@@ -565,8 +565,8 @@ def nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,L
565
565
# 梯度
566
566
def nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):
567
567
length = nn_params.shape[0]
568
- Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)
569
- Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1)
568
+ Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1).copy() # 这里使用copy函数,否则下面修改Theta的值,nn_params也会一起修改
569
+ Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1).copy()
570
570
m = X.shape[0]
571
571
class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系
572
572
# 映射y
@@ -581,9 +581,8 @@ def nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambd
581
581
582
582
Theta1_grad = np.zeros((Theta1.shape)) #第一层到第二层的权重
583
583
Theta2_grad = np.zeros((Theta2.shape)) #第二层到第三层的权重
584
-
585
- Theta1[:,0] = 0;
586
- Theta2[:,0] = 0;
584
+
585
+
587
586
'''正向传播,每次需要补上一列1的偏置bias'''
588
587
a1 = np.hstack((np.ones((m,1)),X))
589
588
z2 = np.dot(a1,np.transpose(Theta1))
@@ -592,15 +591,19 @@ def nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambd
592
591
z3 = np.dot(a2,np.transpose(Theta2))
593
592
h = sigmoid(z3)
594
593
594
+
595
595
'''反向传播,delta为误差,'''
596
596
delta3 = np.zeros((m,num_labels))
597
597
delta2 = np.zeros((m,hidden_layer_size))
598
598
for i in range(m):
599
- delta3[i,:] = h[i,:]-class_y[i,:]
599
+ #delta3[i,:] = (h[i,:]-class_y[i,:])*sigmoidGradient(z3[i,:]) # 均方误差的误差率
600
+ delta3[i,:] = h[i,:]-class_y[i,:] # 交叉熵误差率
600
601
Theta2_grad = Theta2_grad+np.dot(np.transpose(delta3[i,:].reshape(1,-1)),a2[i,:].reshape(1,-1))
601
602
delta2[i,:] = np.dot(delta3[i,:].reshape(1,-1),Theta2_x)*sigmoidGradient(z2[i,:])
602
603
Theta1_grad = Theta1_grad+np.dot(np.transpose(delta2[i,:].reshape(1,-1)),a1[i,:].reshape(1,-1))
603
604
605
+ Theta1[:,0] = 0
606
+ Theta2[:,0] = 0
604
607
'''梯度'''
605
608
grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m
606
609
return np.ravel(grad)
0 commit comments