Skip to content

Commit

Permalink
修改版
Browse files Browse the repository at this point in the history
  • Loading branch information
Timthony committed Nov 5, 2018
1 parent 84cfb64 commit 960baec
Show file tree
Hide file tree
Showing 10 changed files with 361 additions and 16 deletions.
Empty file modified drive.py
100755 → 100644
Empty file.
Empty file modified keras_mnist_cnn_steps.py
100755 → 100644
Empty file.
Empty file modified train_model.py
100755 → 100644
Empty file.
Empty file modified zth_car_control.py
100755 → 100644
Empty file.
Empty file modified zth_collect_data.py
100755 → 100644
Empty file.
Empty file modified zth_drive.py
100755 → 100644
Empty file.
135 changes: 135 additions & 0 deletions zth_process2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
# coding=utf-8
# 将原始的jpg图片处理成Inception-v3模型需要的299×299×3的数字矩阵
# 将所有的图片分为训练/验证/测试3个数据集
import glob
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import os

#####################################1.定义需要使用到的常量###########################

# 原始输入数据的目录
INPUT_DATA = 'datasets/training_data'
# 输出文件的地址。我们将整理后的图片数据通过numpy格式保存
OUTPUT_FILE = 'datasets/processed_data.npy'
# 测试数据和验证数据的比例
VALIDATION_PRECENTAGE = 10
TEST_PRECENTAGE = 10
#####################################2.定义数据处理过程###############################

# 读取数据并将数据分割成训练数据/验证数据/测试数据
def create_image_lists(sess, testing_percentage, validation_percentage):
sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
is_root_dir = True

# 初始化各个数据集。
training_images = []
training_labels = []
testing_images = []
testing_labels = []
validation_images = []
validation_labels = []
current_label = 0

# 读取所有的子目录。
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue

# 获取一个子目录中所有的图片文件。
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
for extension in extensions:
file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list: continue
print("processing:", dir_name)

i = 0
# 处理图片数据。
for file_name in file_list:
i += 1
# 读取并解析图片,将图片转化为299*299以方便inception-v3模型来处理。
image_raw_data = gfile.FastGFile(file_name, 'rb').read()
image = tf.image.decode_jpeg(image_raw_data)
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_images(image, [299, 299])
image_value = sess.run(image)

# 随机划分数据聚。
chance = np.random.randint(100)
if chance < validation_percentage:
validation_images.append(image_value)
validation_labels.append(current_label)
elif chance < (testing_percentage + validation_percentage):
testing_images.append(image_value)
testing_labels.append(current_label)
else:
training_images.append(image_value)
training_labels.append(current_label)
if i % 200 == 0:
print(i, "images processed.")
current_label += 1

# 将训练数据随机打乱以获得更好的训练效果。
state = np.random.get_state()
np.random.shuffle(training_images)
np.random.set_state(state)
np.random.shuffle(training_labels)

return np.asarray([training_images, training_labels,
validation_images, validation_labels,
testing_images, testing_labels])

def main():
#config = tf.ConfigProto(allow_soft_placement = True)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
#config.gpu_options.allow_growth = True
with tf.Session(config=tf.ConfigProto(log_device_placement=False,gpu_options=gpu_options)) as sess:
processed_data = create_image_lists(sess, TEST_PRECENTAGE, VALIDATION_PRECENTAGE)
# 通过numpy格式储存处理过的数据
np.save(OUTPUT_FILE, processed_data)
if __name__ == '__main__':
main()





































2 changes: 1 addition & 1 deletion zth_process_img.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def process_img(img_path, key):
train_imgs = train_imgs[1:, :]
train_labels = train_labels[1:, :]
file_name = str(int(time()))
directory = "/Volumes/Seagate Expansion Drive/tianhangz/project/selfdrive/training_data_npz"
directory = "training_data_npz"

if not os.path.exists(directory):
os.makedirs(directory)
Expand Down
25 changes: 10 additions & 15 deletions zth_train.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def load_data():
# load
image_array = np.zeros((1, 120, 160, 3)) # 初始化
label_array = np.zeros((1, 5), 'float')
training_data = glob.glob('/media/nkdx/Seagate Expansion Drive/tianhangz/project/selfdrive/training_data_npz/*.npz')
training_data = glob.glob('training_data_npz/*.npz')
# 匹配所有的符合条件的文件,并将其以list的形式返回。
print("匹配完成。开始读入")
print("一共%d轮", len(training_data))
Expand Down Expand Up @@ -71,19 +71,13 @@ def build_model(keep_prob):
model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))

#model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3),activation='elu'))
#model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3),activation='elu'))
model.add(Dropout(keep_prob)) # Dropout将在训练过程中每次更新参数时随机断开一定百分比(p)的输入神经元连接
model.add(Flatten())
model.add(Dense(500, activation='elu'))
#model.add(Dropout(0.1))
#model.add(Dense(500, activation='elu'))
model.add(Dense(250, activation='elu'))
#model.add(Dropout(0.1))
model.add(Dense(50, activation='elu'))
#model.add(Dropout(0.1))
#model.add(Dense(50, activation='elu'))
model.add(Dense(5))
model.summary()

Expand All @@ -93,25 +87,26 @@ def build_model(keep_prob):
def train_model(model, learning_rate, nb_epoch, samples_per_epoch,
batch_size, X_train, X_valid, y_train, y_valid):
# 值保存最好的模型存下来
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5', monitor='val_loss',
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='min')
# EarlyStopping patience:当earlystop被激活(如发现loss相比上一个epoch训练没有下降),
# 则经过patience个epoch后停止训练。
# mode:‘auto’,‘min’,‘max’之一,在min模式下,如果检测值停止下降则中止训练。在max模式下,当检测值不再上升则停止训练。
early_stop = EarlyStopping(monitor='val_loss', min_delta=.0005, patience=4,
early_stop = EarlyStopping(monitor='loss', min_delta=.0005, patience=10,
verbose=1, mode='min')
tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=20, write_graph=True,
tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=20, write_graph=True,write_grads=True,
write_images=True, embeddings_freq=0, embeddings_layer_names=None,
embeddings_metadata=None)
# 编译神经网络模型,loss损失函数,optimizer优化器, metrics列表,包含评估模型在训练和测试时网络性能的指标
model.compile(loss='mean_squared_error', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(lr=learning_rate), metrics=['accuracy'])
# 训练神经网络模型,batch_size梯度下降时每个batch包含的样本数,epochs训练多少轮结束,
# verbose是否显示日志信息,validation_data用来验证的数据集
model.fit_generator(batch_generator(X_train, y_train, batch_size),
steps_per_epoch=samples_per_epoch/batch_size,
epochs=nb_epoch,
epochs = nb_epoch,
max_queue_size=1,
validation_data=batch_generator(X_valid, y_valid, batch_size),
validation_steps=len(X_valid)/batch_size,
Expand Down Expand Up @@ -153,7 +148,7 @@ def main():
learning_rate = 0.0001
nb_epoch = 100
samples_per_epoch = 3000
batch_size = 40
batch_size = 30

print('keep_prob = ', keep_prob)
print('learning_rate = ', learning_rate)
Expand Down
Loading

0 comments on commit 960baec

Please sign in to comment.