Skip to content

Commit

Permalink
Remove name arguments from tf.summary.scalar
Browse files Browse the repository at this point in the history
  • Loading branch information
nealwu committed Mar 14, 2017
1 parent 1fe7f45 commit b41ff7f
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 12 deletions.
9 changes: 3 additions & 6 deletions slim/deployment/model_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,11 +232,9 @@ def _gather_clone_loss(clone, num_clones, regularization_losses):
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar(clone.scope + '/clone_loss', clone_loss,
name='clone_loss')
tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss,
name='regularization_loss')
tf.summary.scalar('regularization_loss', regularization_loss)
return sum_loss


Expand Down Expand Up @@ -404,8 +402,7 @@ def deploy(config,

if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss,
name='total_loss'))
summaries.add(tf.summary.scalar('total_loss', total_loss))

if summaries:
# Merge all summaries together.
Expand Down
6 changes: 2 additions & 4 deletions slim/train_image_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,8 +517,7 @@ def clone_fn(batch_queue):
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate,
name='learning_rate'))
summaries.add(tf.summary.scalar('learning_rate', learning_rate))

if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
Expand All @@ -543,8 +542,7 @@ def clone_fn(batch_queue):
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss,
name='total_loss'))
summaries.add(tf.summary.scalar('total_loss', total_loss))

# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
Expand Down
4 changes: 2 additions & 2 deletions street/python/vgsl_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.summary.scalar('loss', self.loss, name='loss')
tf.summary.scalar('loss', self.loss)
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
Expand Down Expand Up @@ -484,7 +484,7 @@ def _AddOptimizer(self, optimizer_type):
opt = tf.train.AdamOptimizer(learning_rate=learn_rate_dec)
else:
raise ValueError('Invalid optimizer type: ' + optimizer_type)
tf.summary.scalar('learn_rate', learn_rate_dec, name='lr_summ')
tf.summary.scalar('learn_rate', learn_rate_dec)

self.train_op = opt.minimize(
self.loss, global_step=self.global_step, name='train')
Expand Down

0 comments on commit b41ff7f

Please sign in to comment.