神经网络简单代码示例

来源:互联网 发布:mac电脑怎么卸载软件 编辑:程序博客网 时间:2024/05/29 11:46

python3.6.1
简单理解神经网络python简单实现

import numpy as npimport matplotlib.pyplot as plt# sigmoid functiondef sigfun(x):    return 1/(1+np.exp(-x))# f(x)= 1/(1+np.exp(-x)), dx(f(x))=f(x)*(1-f(x))def dsigfun(a):    return a*(1-a)# input datasetX = np.array([  [0,0,1],                [1,1,1],                [1,0,1],                [0,1,1] ])# output dataset            y = np.array([[0,1,1,0]]).TX1= np.array([  [0,0,1],                [0,1,1],                [1,0,1],                [1,1,1] ])y1 = np.array([[0,1,1,0]]).T# seed random numbers to make calculation#  random.seed(0)# initialize weights randomly with mean 0w0 =2*np.random.random((3,1))-1print('w0=',w0)# 迭代次数, 测试X,y# 模型,使用 3x1for iter in range(10000):    # forward propagation    # a0也就是输入层    a0 = X    a1 = sigfun(np.dot(a0,w0))    # how much did we miss?    a1_error = y - a1    # multiply how much we missed by the     # slope of the sigmoid at the values in a1    a1_delta = a1_error * dsigfun(a1)    # update weights    w0 += np.dot(a0.T,a1_delta)print ("Output After Training:")print (a1)# 迭代次数, 测试X1,y1# initialize weights randomly with mean 0w0 = 2*np.random.random((3,1)) - 1for iter in range(10000):    # forward propagation    # a0也就是输入层    a0 = X1    a1 = sigfun(np.dot(a0,w0))    # how much did we miss?    a1_error = y1 - a1    # multiply how much we missed by the     # slope of the sigmoid at the values in a1    a1_delta = a1_error * dsigfun(a1)    # update weights    w0 += np.dot(a0.T,a1_delta)print ("Output After Training:")print (a1)print ("w0 After Training:")print (w0)#改进模型,使用 3x5x1import numpy as np#def nonlin(x,deriv=False):#    if(deriv==True):#        return x*(1-x)##    return 1/(1+np.exp(-x))X = np.array([[0,0,1],            [0,1,1],            [1,0,1],            [1,1,1]])y = np.array([[0],            [1],            [1],            [0]])np.random.seed(1)# randomly initialize our weights with mean 0w0 = 2*np.random.random((3,5)) - 1w1 = 2*np.random.random((5,1)) - 1for j in range(60000):    # Feed forward through layers 0, 1, and 2    a0 = X    a1 = sigfun(np.dot(a0,w0))    a2 = sigfun(np.dot(a1,w1))    # how much did we miss the target value?    a2_error = y - a2    if (j% 10000) == 0:        print( "Error:" + str(np.mean(np.abs(a2_error))))    # in what direction is the target value?    # were we really sure? if so, don't change too much.    a2_delta = a2_error*dsigfun(a2)    # how much did each l1 value contribute to the l2 error (according to the weights)?    a1_error = a2_delta.dot(w1.T)    # in what direction is the target a1?    # were we really sure? if so, don't change too much.    a1_delta = a1_error * dsigfun(a1)    w1 += a1.T.dot(a2_delta)    w0 += a0.T.dot(a1_delta)print ( a2)w0 = 2*np.random.random((3,5)) - 1w1 = 2*np.random.random((5,1)) - 1for j in range(60000):    # Feed forward through layers 0, 1, and 2    a0 = X1    a1 = sigfun(np.dot(a0,w0))    a2 = sigfun(np.dot(a1,w1))    # how much did we miss the target value?    a2_error = y1 - a2    if (j% 10000) == 0:        print( "Error:" + str(np.mean(np.abs(a2_error))))    # in what direction is the target value?    # were we really sure? if so, don't change too much.    a2_delta = a2_error*dsigfun(a2)    # how much did each l1 value contribute to the l2 error (according to the weights)?    a1_error = a2_delta.dot(w1.T)    # in what direction is the target a1?    # were we really sure? if so, don't change too much.    a1_delta = a1_error * dsigfun(a1)    w1 += a1.T.dot(a2_delta)    w0 += a0.T.dot(a1_delta)print ( a2)

1![](http://img.blog.csdn.net/20170912202552218?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvcXFfMzk1MzM2MDg=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast)

原创粉丝点击