使用numpy写的神经网络

来源:互联网 发布:linux 备份安装 编辑:程序博客网 时间:2024/06/05 07:24

这段时间在自学神经网络,想实战一下,发现网上找不到python版的,自己试着写一个

#encoding utf-8import numpy as npimport mathtestData = np.arange(0.1,0.9,0.01).reshape(-1,4)td = np.array([[0.1,0.9,0.9],[0.9,0.1,0.9],[0.1,0.1,0.1],[0.9,0.9,0.1]])td = np.transpose(td)#testData = np.([[0.1]])def addBias(arr):#a = np.asarray(arr)b = np.array([[1]])b = np.repeat(b,arr.shape[1],axis=1)c = np.vstack((arr,b))return ctestData = np.transpose (testData)#testData = addBias(testData)def sigmoid(x):# if(x>100):return 1# if(x<-100):return 0return 1.0 / (1.0 + math.exp(-x))def dsingmoid(y):return y * (1.0 - y)sigmoid_ufunc = np.vectorize(sigmoid,otypes=[np.float])dsingmoid_ufunc = np.vectorize(dsingmoid,otypes=[np.float])class Bpnn(object):"""docstring for Bpnn"""def __init__(self, ni,nh,no):super(Bpnn, self).__init__()self.ni = niself.nh = nhself.no = noself.alpha = 0.2self.beata = 0.5self.a2 = 0self.a3 = 0self.a1 = 0self.z2 = 0self.z3 = 0self.d3 = 0self.d2 = 0self.gradw2 = 0self.gradw1 = 0self.gradb2 = 0self.gradb1 = 0self.wih = np.random.rand(nh,ni) * 0.2 - 0.1self.who = np.random.rand(no,nh) * 0.2 - 0.1self.w1 = self.wihself.w2 = self.whoself.b1 = np.random.rand(nh,1) * 0.2 - 0.1self.b2 = np.random.rand(no,1) * 0.2 - 0.1''' all one matrix in order to check whether it is correctself.wih = np.zeros((nh,ni)) + 1self.who = np.zeros((no,nh)) + 1'''def feedforward(self,x):self.a1 = xself.z2 = np.dot(self.w1 , x) + self.b1#print(self.z2)self.a2 = sigmoid_ufunc(self.z2)#print (self.a2)self.z3 = np.dot(self.w2,self.a2) +self.b2#print (self.z3)self.a3 = sigmoid_ufunc(self.z3)#print(self.a3)def backPropagate(self,y):self.d3 = -(y - self.a3) * dsingmoid_ufunc(self.a3) #d3=-(y-a3).f'(z3)# print('d3=',self.d3)self.d2 = np.dot(np.transpose(self.w2),self.d3) * dsingmoid_ufunc(self.a2) #d2=((w2Td3)).f'(z2)self.gradw2 = np.dot(self.d3,np.transpose(self.a2)) + self.beata *self.w2 #grad2=d3(a2T)self.gradb2 = self.d3 self.gradw1 = np.dot(self.d2,np.transpose(self.a1)) + self.beata *self.w1 #grad1=d2(a1T)self.gradb1 = self.d2 def weightUpdate(self,x,y):self.feedforward(x)self.backPropagate(y)# self.gradw2 = self.dcheck(self.w2,x,y)# self.gradb2 = self.dcheck(self.b2,x,y)# self.gradw1 = self.dcheck(self.w1,x,y)# self.gradb1 = self.dcheck(self.b1,x,y)self.w2 = self.w2 - self.gradw2 * self.alpha self.w1 = self.w1 - self.gradw1 * self.alphaself.b2 = self.b2 - self.gradb2 * self.alphaself.b1 = self.b1 - self.gradb1 * self.alpha# print('grad2',self.gradw2,self.gradb2)# print('grad1',self.gradw1,self.gradb1)def train (self,x,y,iterations=1000,alpha=0.2,beata=0.5):self.alpha = alphaself.beata = beatafor i in range(iterations):#for j in range (np.shape(x)[1]):#self.weightUpdate(x[:,j:j+1],y[:,j:j+1])self.weightUpdate(x,y)error = np.sum((y - self.a3) ** 2) + np.sum(self.w2 ** 2) +np.sum(self.w1 ** 2)error1 = np.sum((y - self.a3) ** 2)if (error < 0.001):breakif i % 100 == 0: print('error ' , error)print('error1',error1)def predict(self,x):self.feedforward(x)print (self.a3)def dcheck(self,w,x,y):ww = np.copy(w)for i in range(w.shape[0]):for j in range(w.shape[1]):wij = w[i,j]w[i,j] += 0.0001self.feedforward(x)self.backPropagate(y)you = np.sum((y - self.a3) ** 2)w[i,j] = wijw[i,j] -= 0.0001self.feedforward(x)self.backPropagate(y)zuo = np.sum((y - self.a3) ** 2)w[i,j] = wijww[i,j] = (you - zuo) / 0.0002#print ('ww',ww)return wwdef test():bpnn=Bpnn(4,3,4)bpnn.train(testData,testData,2000,0.2,0.2)print ('testData',testData)bpnn.predict(testData)if __name__ == '__main__':test()
自学的python ,语法差,代码质量差,欢迎大家提出宝贵意见

0 0
原创粉丝点击