forked from dragen1860/TensorFlow-2.x-Tutorials
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
121 lines (80 loc) · 3.35 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import os, six, time
import tensorflow as tf
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from utils import load_dataset, parse
from model import RNNColorbot
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
def test(model, eval_data):
"""
Computes the average loss on eval_data, which should be a Dataset.
"""
avg_loss = keras.metrics.Mean()
for (labels, chars, sequence_length) in eval_data:
predictions = model((chars, sequence_length), training=False)
avg_loss.update_state(keras.losses.mean_squared_error(labels, predictions))
print("eval/loss: %.6f" % avg_loss.result().numpy())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""
Trains model on train_data using optimizer.
"""
for step, (labels, chars, sequence_length) in enumerate(train_data):
with tf.GradientTape() as tape:
predictions = model((chars, sequence_length), training=True)
loss = keras.losses.mean_squared_error(labels, predictions)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if step % 100 == 0:
print(step, 'loss:', float(loss))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main():
batchsz = 64
rnn_cell_sizes = [256, 128]
epochs = 40
data_dir = os.path.join('.', "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=batchsz)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=batchsz)
model = RNNColorbot(
rnn_cell_sizes=rnn_cell_sizes,
label_dimension=3,
keep_prob=0.5)
optimizer = keras.optimizers.Adam(0.01)
for epoch in range(epochs):
start = time.time()
train_one_epoch(model, optimizer, train_data, 50)
end = time.time()
# print("train/time for epoch #%d: %.2f" % (epoch, end - start))
if epoch % 10 == 0:
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input("Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
plt.imshow(data)
plt.title(color_name)
plt.savefig(color_name+'.png')
if __name__ == "__main__":
main()