抄了一遍带有一个隐藏层的平面数据分类 个人认为自己开展深度学习程序编写的话,主要难点就在下图了,也就是反向传播公式的推演了,所以抽几个重点公式推,不会用的编辑器,直接截word笔记里的图了(还是MathType好用!!!
import numpy as np import matplotlib.pyplot as plt from testCase import * import sklearn import sklearn.datasets import sklearn.linear_model from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets np.random.seed(1) X,Y=load_planar_dataset() shape_X=X.shape shape_Y=Y.shape m=Y.shape[1] print("X dimension:"+str(shape_X)) print("Y dimension:"+str(shape_Y)) print("number:"+str(m)) def layer_sizes(X,Y): n_x=X.shape[0] n_h=4 n_y=Y.shape[0] return(n_x,n_h,n_y) def initialize_parameters(n_x,n_h,n_y): np.random.seed(2) W1=np.random.randn(n_h,n_x)*0.01 b1=np.zeros(shape=(n_h,1)) W2=np.random.randn(n_y,n_h)*0.01 b2=np.zeros(shape=(n_y,1)) parameters={ "W1":W1, "b1":b1, "W2":W2, "b2":b2} return parameters def forward_propagation(X,parameters): W1=parameters["W1"] b1=parameters["b1"] W2=parameters["W2"] b2=parameters["b2"] Z1=np.dot(W1,X)+b1 A1=np.tanh(Z1) Z2=np.dot(W2,A1) A2=sigmoid(Z2) cache={ "Z1":Z1, "A1":A1, "Z2":Z2, "A2":A2} return (A2,cache) def compute_cost(A2,Y,parameters): m=Y.shape[1] W1=parameters["W1"] W2=parameters["W2"] logprobs=np.multiply(np.log(A2),Y)+np.multiply((1-Y),np.log(1-A2)) cost=-np.sum(logprobs)/m cost=float(np.squeeze(cost)) return cost def backward_propagation(parameters,cache,X,Y): m=X.shape[1] W1=parameters["W1"] W2=parameters["W2"] A1=cache["A1"] A2=cache["A2"] dZ2=A2-Y dW2=(1/m)*np.dot(dZ2,A1.T) #S'=S(1-S) db2=(1/m)*np.sum(dZ2,axis=1,keepdims=True) dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2)) #Tanh'=1-tanh^2 dW1=(1/m)*np.dot(dZ1,X.T) db1=(1/m)*np.sum(dZ1,axis=1,keepdims=True) grads={ "dW1":dW1, "db1":db1, "dW2":dW2, "db2":db2 } return grads def update_parameters(parameters,learning_rate=1.2): W1,W2=parameters["W1"],parameters["W2"] b1,b2=parameters["b1"],parameters["b2"] dW1,dW2=grads["dW1"],grads["dW2"] db1,db2=grads["db1"],grads["db2"] W1=W1-learning_rate*dW1 b1=b1-learning_rate*db1 W2=W2-learning_rate*dW2 b2=b2-learning_rate*db2 parameters={ "W1":W1, "b1":b1, "W2":W2, "b2":b2} return parameters def nn_model(X,Y,n_h,num_iterations,print_cost=False): np.random.seed(3) n_x=layer_sizes(X,Y)[0] n_y=layer_sizes(X,Y)[2] parameters=initialize_parameters(n_x,n_h,n_y) W1=parameters["W1"] b1=parameters["b1"] W2=parameters["W2"] b2=parameters["b2"] for i in range(num_iterations): A2,cache=forward_propagation(X,parameters) cost=compute_cost(A2,Y,parameters) grads=backward_propagation(parameters,cache,X,Y) parameters=update_parameters(parameters,grads,learning_rate=0.5) if print_cost: if i00==0: print("第 ",i," 次循环,成本为:"+str(cost)) return parameters def predict(parameters,X): A2,cache=forward_propagation(X,parameters) predictions=np.round(A2) return predictions parameters = nn_model(X, Y, n_h = 4, num_iterations=10000, print_cost=True) plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y) plt.title("Decision Boundary for hidden layer size " + str(4)) predictions = predict(parameters, X) print ('准确率: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')