forked from 527515025/My-TensorFlow-tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
2 changed files
with
106 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
#由于不能链接到官方的已经处理好的数据,所以这里通过tensorflow导入mnist数据 | ||
import tensorflow as tf | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
#from _future_ import print_function | ||
import numpy as np | ||
#from keras.dataseets import mnist | ||
from keras.models import Sequential | ||
from keras.layers import Dense | ||
from keras.utils import np_utils | ||
|
||
batch_size = 128 #梯度下降一个批(batch)的数据量 | ||
nb_classes =10 #类别 | ||
nb_epoch =10 #梯度下降epoch循环训练次数,每次循环包含全部的样本 | ||
image_size = 28*28 #输入图片的大小,由于是灰度图片,因此只有一个颜色通道 | ||
|
||
#加载数据 | ||
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | ||
x_train,y_train= mnist.train.images, mnist.train.labels | ||
#print(x_train.shape,y_train.shape) #(55000, 784) (55000, 10) | ||
x_test,y_test= mnist.test.images, mnist.test.labels | ||
#print(x_test.shape,y_test.shape) #(10000, 784) (10000, 10) | ||
#如果y_train\y_test不是one_hot编码,需要进行转换 | ||
|
||
#创建模型,逻辑分类相当于一层全链接的神经网络(Dense是Keras中定义的DNN模型) | ||
model = Sequential([Dense(128,input_shape=(image_size,),activation= 'relu'),Dense(10,input_shape=(128,),activation= 'softmax')]) | ||
#配置优化器,损失函数 | ||
model.compile(optimizer = 'rmsprop',loss = 'categorical_crossentropy',metrics= ['accuracy']) | ||
model.fit(x_train,y_train,batch_size = batch_size,nb_epoch = nb_epoch,verbose = 1,validation_data = (x_test,y_test)) | ||
#score分数包含两部分,一部分是val_loss,一部分是val_acc。取score[1]来进行模型的得分评价 | ||
score = model.evaluate(x_test,y_test,verbose = 0) | ||
print('Accuracy:{}'.format(score[1])) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
#coding:utf-8 | ||
|
||
#下载mnist数据集 | ||
from tensorflow.examples.tutorials.mnist import input_data | ||
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | ||
|
||
import tensorflow as tf | ||
|
||
#定义一些参数 | ||
learning_rate = 0.001 | ||
train_epochs = 20 | ||
batch_size = 64 | ||
|
||
#定义3层感知机的神经单元个数 | ||
n_input = 784 | ||
n_hidden1 = 100 | ||
n_hidden2 = 100 | ||
n_classes = 10 | ||
|
||
#定义网络输入参数占位符 | ||
x = tf.placeholder(tf.float32, shape=[None, n_input]) | ||
y = tf.placeholder(tf.float32, shape=[None, n_classes]) | ||
|
||
#定义权重与偏置 | ||
weights = {'h1': tf.Variable(tf.random_normal([n_input, n_hidden1])), | ||
'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2])), | ||
'out': tf.Variable(tf.random_normal([n_hidden2, n_classes]))} | ||
|
||
biases = {'b1': tf.Variable(tf.random_normal([n_hidden1])), | ||
'b2': tf.Variable(tf.random_normal([n_hidden2])), | ||
'out': tf.Variable(tf.random_normal([n_classes]))} | ||
|
||
|
||
#定义推断过程 | ||
def inference(input_x): | ||
layer_1 = tf.nn.relu(tf.matmul(x, weights['h1']) + biases['b1']) | ||
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights['h2']) + biases['b2']) | ||
out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] | ||
return out_layer | ||
|
||
#构建网络 | ||
logits = inference(x) | ||
prediction = tf.nn.softmax(logits) | ||
|
||
#定义损失函数与优化器 | ||
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) | ||
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) | ||
train_op = optimizer.minimize(loss) | ||
|
||
#定义评价指标(准确度) | ||
pre_correct = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) | ||
accuracy = tf.reduce_mean(tf.cast(pre_correct, tf.float32)) | ||
|
||
#初始化所有变量 | ||
init = tf.global_variables_initializer() | ||
|
||
#开始训练 | ||
with tf.Session() as sess: | ||
sess.run(init) | ||
total_batch = int(mnist.train.num_examples / batch_size) | ||
|
||
for epoch in range(train_epochs): | ||
for batch in range(total_batch): | ||
batch_x, batch_y = mnist.train.next_batch(batch_size) | ||
sess.run(train_op, feed_dict={x:batch_x, y:batch_y}) | ||
|
||
if epoch % 10 == 0: | ||
loss_, acc = sess.run([loss, accuracy], feed_dict={x:batch_x, y:batch_y}) | ||
print("epoch {}, loss {:.4f}, acc {:.3f}".format(epoch, loss_, acc)) | ||
|
||
print("optimizer finished!") | ||
|
||
#计算测试集的准确度 | ||
test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels}) | ||
print('test accuracy', test_acc) |