Machine Learning 学习 之 RBF网络

来源:互联网 发布:校园网解绑网卡mac地址 编辑:程序博客网 时间:2024/05/16 19:43

RBF.py

#coding=utf8import mathimport randomimport numpy as npimport timeimport matplotlib.pyplot as pltrandom.seed(0)'''    BP和RBF的思考:    BP是有黑箱性质的东西,BP是基于插值和函数拟合的    RBF是逼近函数的一种核函数(径向基函数)    RBF只具有感知范围内的探测性,不具有某些结构下(线性等)BP的特性    RBF是一种完全基于数据类似(KNN)方法的一个结构,    以径向基函数的插值代替构造的插值函数    故RBF的初始数据center非常重要,通常使用K-means 聚类实现'''def rand(a, b):    return (b-a)*random.random() + a#不建议随机中心 由聚类确定class Layer:      def __init__(self,typ,prev_num,num,Plist):            self.typ=typ            self.prev_num=prev_num            self.num=num            self.Nodes=[]            self.Plist=Plist            self.__createNodes()      def __createNodes(self):            for i in range(self.num):                  self.Nodes.append(Node(self.typ,i,self.prev_num,self.Plist))class Node:      def __init__(self,typ,index,prev_num,Plist):            self.typ=typ            self.index=index            self.prev_num=prev_num            self.Plist=Plist            self.__initNode()      def __initNode(self):            #如果是输入层 那么 一个神经元连接一个输入 w个数为1            if  self.typ=='input':                self.w=1            elif self.typ=='mm':                self.tz=1                 self.center=self.index+1                #print self.center                #self.tz=rand(1,1)                self.center=rand(0,3)            elif self.typ=='output':                self.w=[rand(-1,1) for i in range(self.prev_num)]                #self.w=[0.30559951451034406, 1.8875762213796705]                #self.w=[1,0]      def __cal(self,data):            inpu=0            if  self.typ=='input':                inpu=data[self.index]            elif self.typ=='output':                for i in range(self.prev_num):                      inpu+=self.w[i]*data[i]            else:                r=abs(self.center-data[0])                inpu=math.exp(-1.0*(r**2)/(self.tz**2))                #print self.center,data[0],r,inpu            return inpu      def calValue(self,data):            self.inpu=self.__cal(data)            self.out=self.inpu            return self.outclass Net:      def __init__(self):            self.Layers=[]      def setData(self,trainData):            self.trainData=trainData      def setNodes(self,NodesNum):            prev_num=len(self.trainData[0][0])            layer=Layer('input',prev_num,prev_num,None)            self.Layers.append(layer)            self.NodesNum=NodesNum            layer=Layer('mm',prev_num,self.NodesNum,None)            self.Layers.append(layer)            num=len(self.trainData[0][1])            layer=Layer('output',self.NodesNum,num,None)            self.Layers.append(layer)      def train(self,learn_rate,maxIter,epoch):            #划分训练集            self.lr=learn_rate            DATA=[self.trainData[i][0] for i in range(len(self.trainData))]            RESULT=[self.trainData[i][1] for i in range(len(self.trainData))]            for k in range(maxIter):                  #计算误差 循环 每个节点 每个训练样本 进行更新                  error=[[0 for i in range(len(RESULT[0]))] for i in range(len(DATA))]                  for i in range(len(DATA)):                        dError=[0 for p in range(len(RESULT[0]))]                        prev_data=DATA[i]                        for layer in self.Layers:                              value=[]                              for node in layer.Nodes:                                    #print prev_data,"SSSSSSSS"                                    value.append(node.calValue(prev_data))                              prev_data=value                        result=prev_data                        for j in range(len(RESULT[0])):                            #print len(RESULT),len(result),i,j                            dError[j]=RESULT[i][j]-result[j]                        error[i]=dError                        if k%epoch==0:                            print RESULT[i][j],result,dError                  for q in range(self.NodesNum):                       delta_center=0.0                       delta_tz=0.0                       delta_weight=0.0                       sum1=0.0                       sum2=0.0                       sum3=0.0                       center=self.Layers[1].Nodes[q].center                       tz=self.Layers[1].Nodes[q].tz                       for m in range(len(DATA)):                            X=DATA[m][0]                            sum1+=error[m][0]*math.exp(-1.0*(X-center)**2/(2*(tz**2)))*(X-center)                            sum2+=error[m][0]*math.exp(-1.0*(X-center)**2/(2*(tz**2)))*(X-center)**2                            sum3+=error[m][0]*math.exp(-1.0*(X-center)**2/(2*(tz**2)))                       weight=self.Layers[2].Nodes[0].w[q]                         delta_center=self.lr*weight/(tz**2)*sum1                       delta_delta=self.lr*weight/pow(tz,3)*sum2                       delta_weight=self.lr*sum3                       self.Layers[1].Nodes[q].center+=delta_center                       self.Layers[1].Nodes[q].tz+=delta_tz                       self.Layers[2].Nodes[0].w[q]+=delta_weight      def test(self,testDATA):            TestDATA=[testDATA[i][0] for i in range(len(testDATA))]            TestRESULT=[testDATA[i][1] for i in range(len(testDATA))]            predicts=[]            Error=[0 for i in range(len(TestRESULT[0]))]            for k in range(len(TestDATA)):                  result=self.predict(TestDATA[k])                  predicts.append(result)                  for q in range(len(Error)):                        Error[q]=(Error[q]*k+abs(1.0*(TestRESULT[k][q]-result[q])))/(k+1)                        print TestRESULT[k][q],result[q],Error            print Error            self.__plotTest(TestRESULT,predicts)            return Error      def __plotTest(self,TestRESULT,predicts):            Ax=[i for i in range(len(TestRESULT))]            plt.plot(Ax,TestRESULT,label='true')            plt.plot(Ax,predicts,label='predict')            plt.legend(loc='upper left')            plt.show()      def predict(self,inpu):            prev_data=inpu            for layer in self.Layers:                  value=[]                  for node in layer.Nodes:                        value.append(node.calValue(prev_data))                  prev_data=value            result=prev_data            return result

test.py

#coding=utf8import timeimport mathfrom RBP import *trainData=[            [[1],[2]],            [[1.1],[2.2]],            [[1.3],[2.6]],            [[1.4],[2.8]],            [[1.6],[3.2]],            [[1.7],[3.4]],            [[1.8],[3.6]],            [[1.9],[3.8]],            [[2],[4]]          ]def generate_data(n):    """generate original data of u and y"""    u = np.random.uniform(-5,5,n)    y=[]    #u=[]    former_y_value = 0    for i in np.arange(0,n):        #u.append(-5.0+10.0*i/n)        #former_y_value=(u[i]+2*math.exp(-16*(u[i]**2)))/5        former_y_value=u[i]*math.sin(u[i])/5.0+u[i]**2/30-0.5        y.append(former_y_value)        '''        next_y_value = 1.0*(29.0/40) * np.sin(            (16.0 * u[i] + 8.0 * former_y_value) / (3 + 4 * (u[i] ** 2) + 4 * (former_y_value ** 2))) \                       + (2.0/10) * u[i] + (2.0/10) * former_y_value        #next_y_value=u[i]        former_y_value = next_y_value        '''    return u,yt0=time.clock()net1=Net()net1.setData(trainData)net1.setNodes(8)net1.train(0.008,20000,1000)print net1.predict([1.65])net1.test(trainData)print "消耗时间:      ",time.clock()-t0,"s"