Skip to content

Commit c21d37a

Browse files
committed
softmax_sgd
1 parent 9965bcc commit c21d37a

File tree

2 files changed

+614
-0
lines changed

2 files changed

+614
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,328 @@
1+
"""
2+
@author:wepon
3+
@blog:http://blog.csdn.net/u012162613/article/details/43157801
4+
5+
"""
6+
7+
# -*- coding: utf-8 -*-
8+
__docformat__ = 'restructedtext en'
9+
10+
import cPickle
11+
import gzip
12+
import os
13+
import sys
14+
import time
15+
16+
import numpy
17+
18+
import theano
19+
import theano.tensor as T
20+
21+
"""
22+
定义Softmax回归模型
23+
在deeplearning tutorial中,直接将LogisticRegression视为Softmax,
24+
而我们所认识的二类别的逻辑回归就是当n_out=2时的LogisticRegression
25+
"""
26+
#参数说明:
27+
#input,输入的一个batch,假设一个batch有n个样本(n_example),则input大小就是(n_example,n_in)
28+
#n_in,每一个样本的大小,MNIST每个样本是一张28*28的图片,故n_in=784
29+
#n_out,输出的类别数,MNIST有0~9共10个类别,n_out=10
30+
class LogisticRegression(object):
31+
def __init__(self, input, n_in, n_out):
32+
33+
#W大小是n_in行n_out列,b为n_out维向量。即:每个输出对应W的一列以及b的一个元素。WX+b
34+
#W和b都定义为theano.shared类型,这个是为了程序能在GPU上跑。
35+
self.W = theano.shared(
36+
value=numpy.zeros(
37+
(n_in, n_out),
38+
dtype=theano.config.floatX
39+
),
40+
name='W',
41+
borrow=True
42+
)
43+
44+
self.b = theano.shared(
45+
value=numpy.zeros(
46+
(n_out,),
47+
dtype=theano.config.floatX
48+
),
49+
name='b',
50+
borrow=True
51+
)
52+
53+
#input是(n_example,n_in),W是(n_in,n_out),点乘得到(n_example,n_out),加上偏置b,
54+
#再作为T.nnet.softmax的输入,得到p_y_given_x
55+
#故p_y_given_x每一行代表每一个样本被估计为各类别的概率
56+
#PS:b是n_out维向量,与(n_example,n_out)矩阵相加,内部其实是先复制n_example个b,
57+
#然后(n_example,n_out)矩阵的每一行都加b
58+
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
59+
60+
#argmax返回最大值下标,因为本例数据集是MNIST,下标刚好就是类别。axis=1表示按行操作。
61+
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
62+
63+
#params,模型的参数
64+
self.params = [self.W, self.b]
65+
66+
#代价函数NLL
67+
#因为我们是MSGD,每次训练一个batch,一个batch有n_example个样本,则y大小是(n_example,),
68+
#y.shape[0]得出行数即样本数,将T.log(self.p_y_given_x)简记为LP,
69+
#则LP[T.arange(y.shape[0]),y]得到[LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,LP[n-1,y[n-1]]]
70+
#最后求均值mean,也就是说,minibatch的SGD,是计算出batch里所有样本的NLL的平均值,作为它的cost
71+
def negative_log_likelihood(self, y):
72+
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
73+
74+
#batch的误差率
75+
def errors(self, y):
76+
# 首先检查y与y_pred的维度是否一样,即是否含有相等的样本数
77+
if y.ndim != self.y_pred.ndim:
78+
raise TypeError(
79+
'y should have the same shape as self.y_pred',
80+
('y', y.type, 'y_pred', self.y_pred.type)
81+
)
82+
# 再检查是不是int类型,是的话计算T.neq(self.y_pred, y)的均值,作为误差率
83+
#举个例子,假如self.y_pred=[3,2,3,2,3,2],而实际上y=[3,4,3,4,3,4]
84+
#则T.neq(self.y_pred, y)=[0,1,0,1,0,1],1表示不等,0表示相等
85+
#故T.mean(T.neq(self.y_pred, y))=T.mean([0,1,0,1,0,1])=0.5,即错误率50%
86+
if y.dtype.startswith('int'):
87+
return T.mean(T.neq(self.y_pred, y))
88+
else:
89+
raise NotImplementedError()
90+
91+
"""
92+
加载MNIST数据集
93+
"""
94+
def load_data(dataset):
95+
# dataset是数据集的路径,程序首先检测该路径下有没有MNIST数据集,没有的话就下载MNIST数据集
96+
#这一部分就不解释了,与softmax回归算法无关。
97+
data_dir, data_file = os.path.split(dataset)
98+
if data_dir == "" and not os.path.isfile(dataset):
99+
# Check if dataset is in the data directory.
100+
new_path = os.path.join(
101+
os.path.split(__file__)[0],
102+
"..",
103+
"data",
104+
dataset
105+
)
106+
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
107+
dataset = new_path
108+
109+
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
110+
import urllib
111+
origin = (
112+
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
113+
)
114+
print 'Downloading data from %s' % origin
115+
urllib.urlretrieve(origin, dataset)
116+
117+
print '... loading data'
118+
#以上是检测并下载数据集mnist.pkl.gz,不是本文重点。下面才是load_data的开始
119+
120+
#从"mnist.pkl.gz"里加载train_set, valid_set, test_set,它们都是包括label的
121+
#主要用到python里的gzip.open()函数,以及 cPickle.load()。
122+
#‘rb’表示以二进制可读的方式打开文件
123+
f = gzip.open(dataset, 'rb')
124+
train_set, valid_set, test_set = cPickle.load(f)
125+
f.close()
126+
127+
128+
#将数据设置成shared variables,主要时为了GPU加速,只有shared variables才能存到GPU memory中
129+
#GPU里数据类型只能是float。而data_y是类别,所以最后又转换为int返回
130+
def shared_dataset(data_xy, borrow=True):
131+
data_x, data_y = data_xy
132+
shared_x = theano.shared(numpy.asarray(data_x,
133+
dtype=theano.config.floatX),
134+
borrow=borrow)
135+
shared_y = theano.shared(numpy.asarray(data_y,
136+
dtype=theano.config.floatX),
137+
borrow=borrow)
138+
return shared_x, T.cast(shared_y, 'int32')
139+
140+
141+
test_set_x, test_set_y = shared_dataset(test_set)
142+
valid_set_x, valid_set_y = shared_dataset(valid_set)
143+
train_set_x, train_set_y = shared_dataset(train_set)
144+
145+
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
146+
(test_set_x, test_set_y)]
147+
return rval
148+
149+
"""
150+
将该模型应用于MNIST
151+
"""
152+
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
153+
dataset='mnist.pkl.gz',
154+
batch_size=600):
155+
#加载数据
156+
datasets = load_data(dataset)
157+
train_set_x, train_set_y = datasets[0]
158+
valid_set_x, valid_set_y = datasets[1]
159+
test_set_x, test_set_y = datasets[2]
160+
#计算有多少个minibatch,因为我们的优化算法是MSGD,是一个batch一个batch来计算cost的
161+
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
162+
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
163+
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
164+
165+
######################
166+
# 开始建模 #
167+
######################
168+
print '... building the model'
169+
170+
171+
#设置变量,index表示minibatch的下标,x表示训练样本,y是对应的label
172+
index = T.lscalar()
173+
x = T.matrix('x')
174+
y = T.ivector('y')
175+
176+
177+
#定义分类器,用x作为input初始化。
178+
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
179+
180+
181+
#定义代价函数,用y来初始化,而其实还有一个隐含的参数x在classifier中。
182+
#这样理解才是合理的,因为cost必须由x和y得来,单单y是得不到cost的。
183+
cost = classifier.negative_log_likelihood(y)
184+
185+
186+
#这里必须说明一下theano的function函数,givens是字典,其中的x、y是key,冒号后面是它们的value。
187+
#在function被调用时,x、y将被具体地替换为它们的value,而value里的参数index就是inputs=[index]这里给出。
188+
#下面举个例子:
189+
#比如test_model(1),首先根据index=1具体化x为test_set_x[1 * batch_size: (1 + 1) * batch_size],
190+
#具体化y为test_set_y[1 * batch_size: (1 + 1) * batch_size]。然后函数计算outputs=classifier.errors(y),
191+
#这里面有参数y和隐含的x,所以就将givens里面具体化的x、y传递进去。
192+
test_model = theano.function(
193+
inputs=[index],
194+
outputs=classifier.errors(y),
195+
givens={
196+
x: test_set_x[index * batch_size: (index + 1) * batch_size],
197+
y: test_set_y[index * batch_size: (index + 1) * batch_size]
198+
}
199+
)
200+
201+
202+
validate_model = theano.function(
203+
inputs=[index],
204+
outputs=classifier.errors(y),
205+
givens={
206+
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
207+
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
208+
}
209+
210+
# 计算各个参数的梯度
211+
g_W = T.grad(cost=cost, wrt=classifier.W)
212+
g_b = T.grad(cost=cost, wrt=classifier.b)
213+
214+
#更新的规则,根据梯度下降法的更新公式
215+
updates = [(classifier.W, classifier.W - learning_rate * g_W),
216+
(classifier.b, classifier.b - learning_rate * g_b)]
217+
218+
#train_model跟上面分析的test_model类似,只是这里面多了updatas,更新规则用上面定义的updates 列表。
219+
train_model = theano.function(
220+
inputs=[index],
221+
outputs=cost,
222+
updates=updates,
223+
givens={
224+
x: train_set_x[index * batch_size: (index + 1) * batch_size],
225+
y: train_set_y[index * batch_size: (index + 1) * batch_size]
226+
}
227+
)
228+
229+
###############
230+
# 开始训练 #
231+
###############
232+
print '... training the model'
233+
234+
patience = 5000
235+
patience_increase = 2
236+
#提高的阈值,在验证误差减小到之前的0.995倍时,会更新best_validation_loss
237+
improvement_threshold = 0.995
238+
#这样设置validation_frequency可以保证每一次epoch都会在验证集上测试。
239+
validation_frequency = min(n_train_batches, patience / 2)
240+
241+
242+
best_validation_loss = numpy.inf #最好的验证集上的loss,最好即最小。初始化为无穷大
243+
test_score = 0.
244+
start_time = time.clock()
245+
246+
done_looping = False
247+
epoch = 0
248+
249+
#下面就是训练过程了,while循环控制的时步数epoch,一个epoch会遍历所有的batch,即所有的图片。
250+
#for循环是遍历一个个batch,一次一个batch地训练。for循环体里会用train_model(minibatch_index)去训练模型,
251+
#train_model里面的updatas会更新各个参数。
252+
#for循环里面会累加训练过的batch数iter,当iter是validation_frequency倍数时则会在验证集上测试,
253+
#如果验证集的损失this_validation_loss小于之前最佳的损失best_validation_loss,
254+
#则更新best_validation_loss和best_iter,同时在testset上测试。
255+
#如果验证集的损失this_validation_loss小于best_validation_loss*improvement_threshold时则更新patience。
256+
#当达到最大步数n_epoch时,或者patience<iter时,结束训练
257+
while (epoch < n_epochs) and (not done_looping):
258+
epoch = epoch + 1
259+
for minibatch_index in xrange(n_train_batches):
260+
261+
minibatch_avg_cost = train_model(minibatch_index)
262+
# iteration number
263+
iter = (epoch - 1) * n_train_batches + minibatch_index
264+
265+
if (iter + 1) % validation_frequency == 0:
266+
# compute zero-one loss on validation set
267+
validation_losses = [validate_model(i)
268+
for i in xrange(n_valid_batches)]
269+
this_validation_loss = numpy.mean(validation_losses)
270+
271+
print(
272+
'epoch %i, minibatch %i/%i, validation error %f %%' %
273+
(
274+
epoch,
275+
minibatch_index + 1,
276+
n_train_batches,
277+
this_validation_loss * 100.
278+
)
279+
)
280+
281+
# if we got the best validation score until now
282+
if this_validation_loss < best_validation_loss:
283+
#improve patience if loss improvement is good enough
284+
if this_validation_loss < best_validation_loss * \
285+
improvement_threshold:
286+
patience = max(patience, iter * patience_increase)
287+
288+
best_validation_loss = this_validation_loss
289+
# test it on the test set
290+
291+
test_losses = [test_model(i)
292+
for i in xrange(n_test_batches)]
293+
test_score = numpy.mean(test_losses)
294+
295+
print(
296+
(
297+
' epoch %i, minibatch %i/%i, test error of'
298+
' best model %f %%'
299+
) %
300+
(
301+
epoch,
302+
minibatch_index + 1,
303+
n_train_batches,
304+
test_score * 100.
305+
)
306+
)
307+
308+
if patience <= iter:
309+
done_looping = True
310+
break
311+
312+
#while循环结束
313+
end_time = time.clock()
314+
print(
315+
(
316+
'Optimization complete with best validation score of %f %%,'
317+
'with test performance %f %%'
318+
)
319+
% (best_validation_loss * 100., test_score * 100.)
320+
)
321+
print 'The code run for %d epochs, with %f epochs/sec' % (
322+
epoch, 1. * epoch / (end_time - start_time))
323+
print >> sys.stderr, ('The code for file ' +
324+
os.path.split(__file__)[1] +
325+
' ran for %.1fs' % ((end_time - start_time)))
326+
327+
if __name__ == '__main__':
328+
sgd_optimization_mnist()

0 commit comments

Comments
 (0)