-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathMain.py
52 lines (43 loc) · 1.62 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import tensorflow as tf
from multiGPU_Framework import get_cpu_variable, get_cpu_variable_shape, multiGPU_Framework
placeholder_list = []
def loss_prediction_function():
'''
:param : None
:return: loss, prediction
Note :
1 .use get_cpu_variable instead of tf.Variable
OR get_cpu_variable_shape to specify shape(In case
initializer doesnt accept shape as argument)
2. Add Loss to collection
Use tf.add_to_collection('losses', loss)
mnist multi-gpu
'''
#Placeholder
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
placeholder_list.append(x)
placeholder_list.append(y_)
# Create the model
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
'''
Calculate Prediction and Loss
'''
# Define loss and optimizer
predictions = tf.nn.softmax(y)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
tf.add_to_collection('losses', loss)
return loss, predictions
from tensorflow.examples.tutorials.mnist import input_data
data_dir = '/home/mvidyasa/Downloads/Multi_GPU_Framework-master/mnist'
mnist = input_data.read_data_sets(data_dir, one_hot=True)
batch_generator = mnist.train.next_batch
mgf = multiGPU_Framework(loss_prediction_function= loss_prediction_function)
mgf.train_model(num_epochs=1000,
placeholders= placeholder_list,
learning_rate=0.001,
batch_generator=batch_generator,
batch_size = 100)