RNN粗浅理解

来源:互联网 发布:数据库删除语句怎么写 编辑:程序博客网 时间:2024/06/08 12:47

很好的学习RNN的网址:点击打开链接

 

1.    什么是RNN

简单地说,RNN就是上一次的输出当做这一次的输入。

2.    什么情况下应该用RNN

a.    首先有一些独立的样本

b.   某些样本与样本之间存在联系,比如a样本的label不仅和a样本本身的特征有关,还和b样本的输出有关。

c.    将所有有关联的样本分成一个一个的簇,簇内样本与样本之间存在联系;簇外无任何联系。

3.    一个简单的例子

用神经网络,求a,b两个整数的和,a和b分别用8位二进制数表示。


4.    网络结构:


5.    代码:

#-*- coding:utf-8 -*-
import copy,numpy as np
np.random.seed(0)


print("start")

# compute sigmoid nonlinearity
def sigmoid(x):
    output = 1/(1+np.exp(-x))
    returnoutput

# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
    returnoutput*(1-output)

# training dataset generation
int2binary = {}
binary_dim = 8


largest_number = pow(2,binary_dim)
print binary_dim,largest_number,(type(int2binary))
binary  = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
print "binary=",binary,type(binary),binary.shape

for i inrange(largest_number):
    int2binary[i] = binary[i]
    #print"i=",i,"int2binary=",int2binary[i]

#input cariables
alpha = 0.1
input_dim = 2
hidden_dim = 16
output_dim = 1

#initialize neural network weights
synapse_0 = 2*np.random.random((input_dim,hidden_dim))-1
synapse_1 = 2*np.random.random((hidden_dim,output_dim)) -1
synapse_h = 2*np.random.random((hidden_dim,hidden_dim)) -1

print type(synapse_0),synapse_0.shape,synapse_1.shape,synapse_h.shape

synapse_0_update = np.zeros_like(synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)

print type(synapse_0_update),synapse_0_update.shape

#train logic
for j inrange(1):

    #generatea simple addition problem (a+b=c)
   
a_int= np.random.randint(largest_number/2#largest_num=256  a_int是一个0128的整数
   
a =int2binary[a_int]
    #print"a_int=",a_int," binary is ",a

   
b_int= np.random.randint(largest_number/2#largest_num=256  a_int是一个0128的整数
   
b =int2binary[b_int]
    #print"a_int=",a_int," binary is ",a

    # true answer
   
c_int=a_int + b_int
    c= int2binary[c_int]
    #printa_int,b_int,c_int
    # where we'll store our best guess(binary encoded)
   
d =np.zeros_like(c)

    overallError = 0
   
layer_2_deltas= list()
    layer_1_values = list()
   layer_1_values.append(np.zeros(hidden_dim))
    #printlayer_1_values

    # moving along the positions in thebinary encoding
   
for position in range(binary_dim):

        #generate input and output
        
X =np.array([[a[binary_dim - position - 1], b[binary_dim - position - 1]]])
        #print"position =",position,"X=",X
       
y =np.array([[c[binary_dim - position - 1]]]).T
        #printy
        # hidden layer (input ~+prev_hidden)
        #printX.shape,synapse_0.shape,layer_1_values[-1].shape,synapse_h.shape
       
layer_1= sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h))
        #print"position=",position,"layer_1=",layer_1[0].shape

        # output layer (new binary representation)
       
layer_2= sigmoid(np.dot(layer_1, synapse_1))
        #print"position=",position,"layer_2=",layer_2

        # did we miss?... if so, by howmuch?
       
layer_2_error= y - layer_2
       layer_2_deltas.append((layer_2_error) * sigmoid_output_to_derivative(layer_2))
        overallError +=np.abs(layer_2_error[0])
        #print"position=",position,"layer_2_deltas=",len(layer_2_deltas),layer_2_deltas,"overallError",overallError
       
d[binary_dim- position - 1] = np.round(layer_2[0][0])
        #printd[binary_dim - position - 1]

        # store hidden layer so we canuse it in the next timestep
       
layer_1_values.append(copy.deepcopy(layer_1))

    future_layer_1_delta =np.zeros(hidden_dim)


    for position in range(binary_dim):

        X =np.array([[a[position],b[position]]])
        layer_1 =layer_1_values[-position - 1]
        prev_layer_1 =layer_1_values[-position - 2]
        #print"position = ",position,"layer_1_values size is",len(layer_1_values)
        #print layer_1_values[-position - 1]
        # error at output layer

       
layer_2_delta= layer_2_deltas[-position - 1]
        printlayer_2_delta

        #error at hidden layer
       
layer_1_delta= (future_layer_1_delta.dot(synapse_h.T) + layer_2_delta.dot(
            synapse_1.T)) *sigmoid_output_to_derivative(layer_1)

        #let's update all our weights so we can try again
       
synapse_1_update+= np.atleast_2d(layer_1).T.dot(layer_2_delta)
        synapse_h_update +=np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
        synapse_0_update +=X.T.dot(layer_1_delta)
        future_layer_1_delta =layer_1_delta

    synapse_0 += synapse_0_update * alpha
    synapse_1 += synapse_1_update * alpha
    synapse_h += synapse_h_update * alpha

    synapse_0_update *= 0
   
synapse_1_update*= 0
   
synapse_h_update*= 0


   
#print out progress
   
if j%1000==0:
        print"Error:"+str(overallError)
        print"Pred:"+str(d)
        print"True:"+str(c)
        out = 0
       
for index, x in enumerate(reversed(d)):
            out += x * pow(2, index)
        printstr(a_int) +" + "+ str(b_int) + " = "+str(out)

        print"____________"

 

 

6.    sd

7.     


原创粉丝点击