Skip to content

Commit 3870619

Browse files
committed
Fixed broken link.
1 parent df02f32 commit 3870619

File tree

1 file changed

+34
-34
lines changed

1 file changed

+34
-34
lines changed

README.md

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ print(b.name) # prints "b:0"
210210
TensorFlow introduces two different context managers to alter the name of tensors and variables. The first is tf.name_scope which modifies the name of tensors:
211211

212212
```python
213-
with tf.name_scope('scope'):
213+
with tf.name_scope("scope"):
214214
a = tf.get_variable(name="a", shape=[])
215215
print(a.name) # prints "a:0"
216216

@@ -221,7 +221,7 @@ with tf.name_scope('scope'):
221221
The other is tf.variable_scope which modifies the name of both tensors and variables:
222222

223223
```python
224-
with tf.variable_scope('scope'):
224+
with tf.variable_scope("scope"):
225225
a = tf.get_variable(name="a", shape=[])
226226
print(a.name) # prints "scope/a:0"
227227

@@ -234,16 +234,16 @@ Note that there are two ways to define new variables in TensorFlow, by calling t
234234
tf.get_variable enables variable sharing which is useful when building neural network models. Calling tf.get_variable with a new name results in creating a new variable, but if a variable with a same name exists it will raise a ValueError exception, telling us that re-declaring a variable is not allowed:
235235

236236
```python
237-
with tf.variable_scope('scope'):
237+
with tf.variable_scope("scope"):
238238
a1 = tf.get_variable(name="a", shape=[])
239239
a2 = tf.get_variable(name="a", shape=[]) # Disallowed
240240
```
241241

242242
But what if we actually want to reuse a previously declared variable? Variable scopes also provide the functionality to do that:
243243
```python
244-
with tf.variable_scope('scope'):
244+
with tf.variable_scope("scope"):
245245
a1 = tf.get_variable(name="a", shape=[])
246-
with tf.variable_scope('scope', reuse=True):
246+
with tf.variable_scope("scope", reuse=True):
247247
a2 = tf.get_variable(name="a", shape=[]) # OK
248248
```
249249

@@ -552,20 +552,20 @@ import numpy as np
552552
import PIL
553553
import tensorflow as tf
554554

555-
def visualize_labeled_images(images, labels, max_outputs=3, name='image'):
555+
def visualize_labeled_images(images, labels, max_outputs=3, name="image"):
556556
def _visualize_image(image, label):
557557
# Do the actual drawing in python
558558
fig = plt.figure(figsize=(3, 3), dpi=80)
559559
ax = fig.add_subplot(111)
560560
ax.imshow(image[::-1,...])
561561
ax.text(0, 0, str(label),
562-
horizontalalignment='left',
563-
verticalalignment='top')
562+
horizontalalignment="left",
563+
verticalalignment="top")
564564
fig.canvas.draw()
565565

566566
# Write the plot as a memory file.
567567
buf = io.BytesIO()
568-
data = fig.savefig(buf, format='png')
568+
data = fig.savefig(buf, format="png")
569569
buf.seek(0)
570570

571571
# Read the image and convert to numpy array
@@ -595,7 +595,7 @@ Note that since summaries are usually only evaluated once in a while (not per st
595595
```python
596596
import tensorflow as tf
597597

598-
with tf.device(tf.DeviceSpec(device_type='CPU', device_index=0)):
598+
with tf.device(tf.DeviceSpec(device_type="CPU", device_index=0)):
599599
a = tf.random_uniform([1000, 100])
600600
b = tf.random_uniform([1000, 100])
601601
c = a + b
@@ -605,7 +605,7 @@ tf.Session().run(c)
605605

606606
The same thing can as simply be done on GPU:
607607
```python
608-
with tf.device(tf.DeviceSpec(device_type='GPU', device_index=0)):
608+
with tf.device(tf.DeviceSpec(device_type="GPU", device_index=0)):
609609
a = tf.random_uniform([1000, 100])
610610
b = tf.random_uniform([1000, 100])
611611
c = a + b
@@ -618,7 +618,7 @@ split_b = tf.split(b, 2)
618618

619619
split_c = []
620620
for i in range(2):
621-
with tf.device(tf.DeviceSpec(device_type='GPU', device_index=i)):
621+
with tf.device(tf.DeviceSpec(device_type="GPU", device_index=i)):
622622
split_c.append(split_a[i] + split_b[i])
623623

624624
c = tf.concat(split_c, axis=0)
@@ -633,7 +633,7 @@ def make_parallel(fn, num_gpus, **kwargs):
633633

634634
out_split = []
635635
for i in range(num_gpus):
636-
with tf.device(tf.DeviceSpec(device_type='GPU', device_index=i)):
636+
with tf.device(tf.DeviceSpec(device_type="GPU", device_index=i)):
637637
with tf.variable_scope(tf.get_variable_scope(), reuse=i > 0):
638638
out_split.append(fn(**{k : v[i] for k, v in in_splits.items()}))
639639

@@ -772,7 +772,7 @@ def non_differentiable_entropy(logits):
772772
probs = tf.nn.softmax(logits)
773773
return tf.nn.softmax_cross_entropy_with_logits(labels=probs, logits=logits)
774774

775-
w = tf.get_variable('w', shape=[5])
775+
w = tf.get_variable("w", shape=[5])
776776
y = -non_differentiable_entropy(w)
777777

778778
opt = tf.train.AdamOptimizer()
@@ -810,7 +810,7 @@ def entropy(logits, dim=-1):
810810
nplogp = probs * (tf.reduce_logsumexp(logits, dim, keep_dims=True) - logits)
811811
return tf.reduce_sum(nplogp, dim)
812812

813-
w = tf.get_variable('w', shape=[5])
813+
w = tf.get_variable("w", shape=[5])
814814
y = -entropy(w)
815815

816816
print(w.get_shape())
@@ -966,7 +966,7 @@ estimator.fit(input_fn=input_fn, max_steps=...)
966966

967967
and to evaluate the model, call Estimator.evaluate(), providing a set of metrics:
968968
```
969-
metrics = { 'accuracy': tf.metrics.accuracy }
969+
metrics = { "accuracy": tf.metrics.accuracy }
970970
estimator.evaluate(input_fn=input_fn, metrics=metrics)
971971
```
972972

@@ -989,9 +989,9 @@ An even higher level way of running experiments is by using learn_runner.run() f
989989
```python
990990
import tensorflow as tf
991991

992-
tf.flags.DEFINE_string('output_dir', '', 'Optional output dir.')
993-
tf.flags.DEFINE_string('schedule', 'train_and_evaluate', 'Schedule.')
994-
tf.flags.DEFINE_string('hparams', '', 'Hyper parameters.')
992+
tf.flags.DEFINE_string("output_dir", "", "Optional output dir.")
993+
tf.flags.DEFINE_string("schedule", "train_and_evaluate", "Schedule.")
994+
tf.flags.DEFINE_string("hparams", "", "Hyper parameters.")
995995

996996
FLAGS = tf.flags.FLAGS
997997
learn = tf.contrib.learn
@@ -1016,10 +1016,10 @@ def main(unused_argv):
10161016
schedule=FLAGS.schedule,
10171017
hparams=hparams)
10181018

1019-
if __name__ == '__main__':
1019+
if __name__ == "__main__":
10201020
tf.app.run()
10211021
```
1022-
The schedule flag decides which member function of the Experiment object gets called. So, if you for example set schedule to 'train_and_evaluate', experiment.train_and_evaluate() would be called.
1022+
The schedule flag decides which member function of the Experiment object gets called. So, if you for example set schedule to "train_and_evaluate", experiment.train_and_evaluate() would be called.
10231023

10241024
Now let's have a look at how we might actually write an input function. One way to do this is through python ops (See [this item](#python_ops) for more information on python ops).
10251025
```python
@@ -1044,8 +1044,8 @@ An alternative way is to write your data as TFRecords format and use the multi-t
10441044
```python
10451045
def input_fn():
10461046
features = {
1047-
'image': tf.FixedLenFeature([], tf.string),
1048-
'label': tf.FixedLenFeature([], tf.int64),
1047+
"image": tf.FixedLenFeature([], tf.string),
1048+
"label": tf.FixedLenFeature([], tf.int64),
10491049
}
10501050
tensors = tf.contrib.learn.read_batch_features(
10511051
file_pattern=...,
@@ -1055,29 +1055,29 @@ def input_fn():
10551055
```
10561056
See [mnist.py](https://github.com/vahidk/EffectiveTensorFlow/blob/master/code/framework/dataset/mnist.py) for an example of how to convert your data to TFRecords format.
10571057

1058-
The framework also comes with a simple convolutional network classifier in [convnet_classifier.py](https://github.com/vahidk/EffectiveTensorFlow/blob/master/code/framework/model/convnet_classifier.py) that includes an example model and evaluation metric:
1058+
The framework also comes with a simple convolutional network classifier in [cnn_classifier.py](https://github.com/vahidk/EffectiveTensorFlow/blob/master/code/framework/model/cnn_classifier.py) that includes an example model and evaluation metric:
10591059

10601060
```python
10611061
def model_fn(features, labels, mode, params):
1062-
images = features['image']
1063-
labels = labels['label']
1062+
images = features["image"]
1063+
labels = labels["label"]
10641064

10651065
predictions = ...
10661066
loss = ...
10671067

1068-
return {'predictions': predictions}, loss
1068+
return {"predictions": predictions}, loss
10691069

10701070
def eval_metrics_fn(params):
10711071
return {
1072-
'accuracy': tf.contrib.learn.MetricSpec(tf.metrics.accuracy)
1072+
"accuracy": tf.contrib.learn.MetricSpec(tf.metrics.accuracy)
10731073
}
10741074
```
10751075
MetricSpec connects our model to the given metric function (e.g. tf.metrics.accuracy). Since our label and predictions solely include a single tensor, everything automagically works. Although if your label/prediction includes multiple tensors, you need to explicitly specify which tensors you want to pass to the metric function:
10761076
```python
10771077
tf.contrib.learn.MetricSpec(
10781078
tf.metrics.accuracy,
1079-
label_key='label',
1080-
prediction_key='predictions')
1079+
label_key="label",
1080+
prediction_key="predictions")
10811081
```
10821082

10831083
And that's it! This is all you need to get started with TensorFlow learn API. I recommend to have a look at the [source code](https://github.com/vahidk/EffectiveTensorFlow/tree/master/code/framework) and see the official python API to learn more about the learn API.
@@ -1123,7 +1123,7 @@ def batch_gather(tensor, indices):
11231123
return output
11241124

11251125
def rnn_beam_search(update_fn, initial_state, sequence_length, beam_width,
1126-
begin_token_id, end_token_id, name='rnn'):
1126+
begin_token_id, end_token_id, name="rnn"):
11271127
"""Beam-search decoder for recurrent models.
11281128
11291129
Args:
@@ -1201,13 +1201,13 @@ def merge(tensors, units, activation=tf.nn.relu, name=None, **kwargs):
12011201
tensors: A list of tensor with the same rank.
12021202
units: Number of units in the projection function.
12031203
"""
1204-
with tf.variable_scope(name, default_name='merge'):
1204+
with tf.variable_scope(name, default_name="merge"):
12051205
# Apply linear projection to input tensors.
12061206
projs = []
12071207
for i, tensor in enumerate(tensors):
12081208
proj = tf.layers.dense(
12091209
tensor, units, activation=None,
1210-
name='proj_%d' % i,
1210+
name="proj_%d" % i,
12111211
**kwargs)
12121212
projs.append(proj)
12131213

@@ -1260,7 +1260,7 @@ def make_parallel(fn, num_gpus, **kwargs):
12601260

12611261
out_split = []
12621262
for i in range(num_gpus):
1263-
with tf.device(tf.DeviceSpec(device_type='GPU', device_index=i)):
1263+
with tf.device(tf.DeviceSpec(device_type="GPU", device_index=i)):
12641264
with tf.variable_scope(tf.get_variable_scope(), reuse=i > 0):
12651265
out_split.append(fn(**{k : v[i] for k, v in in_splits.items()}))
12661266

0 commit comments

Comments
 (0)