-
Notifications
You must be signed in to change notification settings - Fork 0
/
fc.py
143 lines (121 loc) · 6.21 KB
/
fc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers import regularizers
class FC(object):
"""
This class implements a convolutional neural network in TensorFlow
It incorporates a certain graph model to be trained and to be used
in inference
"""
def __init__(self, n_classes = 10):
"""
Constructor for an ConvNet object Default values should be used as hints for
the usage of each parameter
Args:
n_classes: int, number of classes of the classification problem
This number is required in order to specify the
output dimensions of the ConvNet
"""
self.n_classes = n_classes
def inference(self, x):
"""
Performs inference given an input tensor This is the central portion
of the network where we describe the computation graph Here an input
tensor undergoes a series of convolution, pooling and nonlinear operations
as defined in this method For the details of the model, please
see assignment file
Here we recommend you to consider using variable and name scopes in order
to make your graph more intelligible for later references in TensorBoard
and so on You can define a name scope for the whole model or for each
operator group (eg conv+pool+relu) individually to group them by name
Variable scopes are essential components in TensorFlow for parameter sharing
Although the model(s) which are within the scope of this class do not require
parameter sharing it is a good practice to use variable scope to encapsulate
model
Args:
x: 4D float Tensor of size [batch_size, input_height, input_width, input_channels]
Returns:
logits: 2D float Tensor of size [batch_size, self.n_classes] Returns
the logits outputs (before softmax transformation) of the
network These logits can then be used with loss and accuracy
to evaluate the model
"""
with tf.variable_scope('FCC'):
def _forward_fc_layer(name, w_shape, b_shape, x_inp, regularizer_strength, act_func):
with tf.variable_scope(name):
W = tf.get_variable('W', w_shape, initializer=tf.random_normal_initializer(mean = 0.0, stddev=1e-3, dtype=tf.float32),regularizer = regularizers.l2_regularizer(regularizer_strength))
b = tf.get_variable('b', b_shape, initializer=tf.constant_initializer(0))
out = act_func(tf.matmul(x_inp, W) + b)
tf.histogram_summary(name + '_weights', W)
tf.histogram_summary(name + '_b', b)
tf.histogram_summary(name + '_out', out)
return out
self.flatten = tf.reshape(x, [-1, 512])
self.fc1 = _forward_fc_layer(name='fc1', w_shape=[512, 384], b_shape=384,
x_inp=self.flatten, regularizer_strength=0.001, act_func=tf.nn.relu)
self.fc2 = _forward_fc_layer(name='fc2', w_shape=[384, 192], b_shape=192,
x_inp=self.fc1, regularizer_strength=0.001, act_func=tf.nn.relu)
logits = _forward_fc_layer(name='logits', w_shape=[192, 10], b_shape=10,
x_inp=self.fc2, regularizer_strength=0.001, act_func=lambda x: x)
return logits
def accuracy(self, logits, labels):
"""
Calculate the prediction accuracy, ie the average correct predictions
of the network
As in self.loss above, you can use tf.scalar_summary to save
scalar summaries of accuracy for later use with the TensorBoard
Args:
logits: 2D float Tensor of size [batch_size, self.n_classes]
The predictions returned through self.inference
labels: 2D int Tensor of size [batch_size, self.n_classes]
with one-hot encoding Ground truth labels for
each observation in batch
Returns:
accuracy: scalar float Tensor, the accuracy of predictions,
ie the average correct predictions over the whole batch
"""
########################
# PUT YOUR CODE HERE #
########################
correct_prediction = tf.equal(tf.argmax(labels, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', accuracy)
# raise NotImplementedError
########################
# END OF YOUR CODE #
########################
return accuracy
def loss(self, logits, labels):
"""
Calculates the multiclass cross-entropy loss from the logits predictions and
the ground truth labels The function will also add the regularization
loss from network weights to the total loss that is return
In order to implement this function you should have a look at
tf.nn.softmax_cross_entropy_with_logits
You can use tf.scalar_summary to save scalar summaries of
cross-entropy loss, regularization loss, and full loss (both summed)
for use with TensorBoard This will be useful for compiling your report
Args:
logits: 2D float Tensor of size [batch_size, self.n_classes]
The predictions returned through self.inference
labels: 2D int Tensor of size [batch_size, self.n_classes]
with one-hot encoding Ground truth labels for each
observation in batch
Returns:
loss: scalar float Tensor, full loss = cross_entropy + reg_loss
"""
########################
# PUT YOUR CODE HERE #
########################
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels, name='crossentropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.scalar_summary('cross-entropy loss', loss)
# loss = cross_entropy
# raise NotImplementedError
########################
# END OF YOUR CODE #
########################
return loss