Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
Ivan Bogatyy committed Mar 23, 2017
2 parents 9a463f1 + 277f99c commit 296d4f6
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 6 deletions.
2 changes: 1 addition & 1 deletion inception/inception/slim/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ their use, consider the following example.
def MyNewOp(inputs):
varA = ...
varB = ...
outputs = tf.mul(varA, inputs) + varB
outputs = tf.multiply(varA, inputs) + varB
return outputs

```
Expand Down
5 changes: 3 additions & 2 deletions textsum/seq2seq_attention_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,9 @@ def _add_seq2seq(self):
def sampled_loss_func(inputs, labels):
with tf.device('/cpu:0'): # Try gpu.
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, v, inputs, labels,
hps.num_softmax_samples, vsize)
return tf.nn.sampled_softmax_loss(
weights=w_t, biases=v, labels=labels, inputs=inputs,
num_sampled=hps.num_softmax_samples, num_classes=vsize)

if hps.num_softmax_samples != 0 and hps.mode == 'train':
self._loss = seq2seq_lib.sampled_sequence_loss(
Expand Down
2 changes: 1 addition & 1 deletion tutorials/rnn/ptb/ptb_word_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __init__(self, is_training, config, input_):
# different than reported in the paper.
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
Expand Down
4 changes: 2 additions & 2 deletions tutorials/rnn/translate/seq2seq_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,13 @@ def __init__(self,
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)

def sampled_loss(labels, inputs):
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
weights=local_w_t,
Expand Down

0 comments on commit 296d4f6

Please sign in to comment.