Deeplearning原文作者Hinton代码注解

来源:互联网 发布:java oa 开源项目 编辑:程序博客网 时间:2024/05/01 15:50

Matlab示例代码为两部分,分别对应不同的论文:1. Reducing the Dimensionality of data with neural networks   ministdeepauto.m   backprop.m   rbmhidlinear.m2. A fast learing algorithm for deep belief net  mnistclassify.m   backpropclassfy.m   其余部分代码通用。%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%mnistclassify.m%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%clear allclose allmaxepoch=50;    %迭代次数numhid=500; numpen=500; numpen2=2000; fprintf(1,'Converting Raw files into Matlab format \n');converter;  fprintf(1,'Pretraining a deep autoencoder. \n');fprintf(1,'The Science paper used 50 epochs. This uses %3i \n', maxepoch);makebatches;%分批数据 [numcases numdims numbatches]=size(batchdata); %获取batchdata数据大小%%numcases  每批数据的个数%%numdims   数据元组的维度%%numbtches 数据批数fprintf(1,'Pretraining Layer 1 with RBM: %d-%d \n',numdims,numhid);%图像输入层到第一个隐藏层restart=1;                  %设置初始化参数rbm;                %调用RBM训练数据 hidrecbiases=hidbiases;  %获取隐藏层偏置值save mnistvhclassify vishid hidrecbiases visbiases; %fprintf(1,'\nPretraining Layer 2 with RBM: %d-%d \n',numhid,numpen);%第一个隐藏层到第二个隐藏层batchdata=batchposhidprobs;     %上一个RBM的隐藏层输出,读入作为这个RBM的输入numhid=numpen;%设置隐藏层的节点数,输入的节点数已经由读入数据给出restart=1;rbm;hidpen=vishid; penrecbiases=hidbiases; hidgenbiases=visbiases; %同上,提取权值,偏置,save mnisthpclassify hidpen penrecbiases hidgenbiases;fprintf(1,'\nPretraining Layer 3 with RBM: %d-%d \n',numpen,numpen2);%第二个隐藏层到第三层隐藏层,其余同上batchdata=batchposhidprobs;numhid=numpen2;restart=1;rbm;hidpen2=vishid; penrecbiases2=hidbiases; hidgenbiases2=visbiases;save mnisthp2classify hidpen2 penrecbiases2 hidgenbiases2;backpropclassify;  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%backpropclassify.m%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%maxepoch=200;fprintf(1,'\nTraining discriminative model on MNIST by minimizing cross entropy error. \n');%最小化交叉熵fprintf(1,'60 batches of 1000 cases each. \n');load mnistvhclassify%加载各层之间的权值,以及偏置load mnisthpclassifyload mnisthp2classifymakebatches;%分批数据[numcases numdims numbatches]=size(batchdata);N=numcases; %获取每批数据向量数%%%% PREINITIALIZE WEIGHTS OF THE DISCRIMINATIVE MODEL%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%w1=[vishid; hidrecbiases];%第一层到第二层的权重,以及第二层的偏置w2=[hidpen; penrecbiases];%类上w3=[hidpen2; penrecbiases2];%类上w_class = 0.1*randn(size(w3,2)+1,10);%随机生成第四层列数+1行,10列的矩阵 %%%%%%%%%% END OF PREINITIALIZATIO OF WEIGHTS  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%l1=size(w1,1)-1;%获取每层的单元个数l2=size(w2,1)-1;l3=size(w3,1)-1;l4=size(w_class,1)-1;%最高层的单元个数l5=10; %label层单元个数test_err=[];%train_err=[];%for epoch = 1:maxepoch%%%%%%%%%%%%%%%%%%%% COMPUTE TRAINING MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%err=0; err_cr=0;counter=0;[numcases numdims numbatches]=size(batchdata);%%numcases  每批数据的个数%%numdims   数据元组的维度%%numbtches 数据批数N=numcases;%%每批次数据向量个数 for batch = 1:numbatches  data = [batchdata(:,:,batch)];%读取一批次数据  target = [batchtargets(:,:,batch)];%读取当前批次的目标值  data = [data ones(N,1)];%在原数据后添加N行1列数据  w1probs = 1./(1 + exp(-data*w1)); w1probs = [w1probs  ones(N,1)];%sigmod计算各层的概率值,参见BP算法  w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];  w3probs = 1./(1 + exp(-w2probs*w3)); w3probs = [w3probs  ones(N,1)];    targetout = exp(w3probs*w_class);%计算最后的输出值N行10列  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  %对最后的label的输出处理过程,见公式6.1,其中w3probs*w_class是label的输入  %最后只能有一个单元被激活,激活单元的选择即通过下面计算得出的概率来进行选择  %10个单元组成的“softmax”组  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  targetout = targetout./repmat(sum(targetout,2),1,10);%计算最后10个label输出除以输出值的总和  [I J]=max(targetout,[],2);%取计算结果每行中的最大值,以及其列标  [I1 J1]=max(target,[],2);%取原先设定目标值的最大值以及列标  counter=counter+length(find(J==J1));%统计正确的条数  err_cr = err_cr- sum(sum( target(:,1:end).*log(targetout))) ; %%%%???? end train_err(epoch)=(numcases*numbatches-counter);%总的错误条数??? train_crerr(epoch)=err_cr/numbatches;%平均每批次错误率???%%%%%%%%%%%%%% END OF COMPUTING TRAINING MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% COMPUTE TEST MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%err=0;err_cr=0;counter=0;[testnumcases testnumdims testnumbatches]=size(testbatchdata);N=testnumcases;for batch = 1:testnumbatches  data = [testbatchdata(:,:,batch)];  target = [testbatchtargets(:,:,batch)];  data = [data ones(N,1)];  w1probs = 1./(1 + exp(-data*w1)); w1probs = [w1probs  ones(N,1)];  w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];  w3probs = 1./(1 + exp(-w2probs*w3)); w3probs = [w3probs  ones(N,1)];  targetout = exp(w3probs*w_class);  targetout = targetout./repmat(sum(targetout,2),1,10);  [I J]=max(targetout,[],2);  [I1 J1]=max(target,[],2);  counter=counter+length(find(J==J1));  err_cr = err_cr- sum(sum( target(:,1:end).*log(targetout))) ;end test_err(epoch)=(testnumcases*testnumbatches-counter); test_crerr(epoch)=err_cr/testnumbatches; fprintf(1,'Before epoch %d Train # misclassified: %d (from %d). Test # misclassified: %d (from %d) \t \t \n',...            epoch,train_err(epoch),numcases*numbatches,test_err(epoch),testnumcases*testnumbatches); %%%%%%%%%%%%%% END OF COMPUTING TEST MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% tt=0;  for batch = 1:numbatches/10 fprintf(1,'epoch %d batch %d\r',epoch,batch);%%%%%%%%%%% COMBINE 10 MINIBATCHES INTO 1 LARGER MINIBATCH %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%组合10个小批次为1000样例的批次,然后用conjugate gradient来进行微调 tt=tt+1;  data=[]; targets=[];  for kk=1:10  data=[data         batchdata(:,:,(tt-1)*10+kk)]; %10个小批次合成  targets=[targets        batchtargets(:,:,(tt-1)*10+kk)]; end %%%%%%%%%%%%%%% PERFORM CONJUGATE GRADIENT WITH 3 LINESEARCHES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%  max_iter=3;       %设置线性搜索的次数  if epoch<6                            % First update top-level weights holding other weights fixed.      N = size(data,1);               %获取数据的行数     XX = [data ones(N,1)];    %每行数据后面增加1,用来增加偏置    w1probs = 1./(1 + exp(-XX*w1)); w1probs = [w1probs  ones(N,1)];    w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];    w3probs = 1./(1 + exp(-w2probs*w3)); %w3probs = [w3probs  ones(N,1)];    VV = [w_class(:)']';    %VV将随机生成的向量w_class展开成一列???为什么展开成一列与minimize的参数有关    %    Dim = [l4; l5];             %记录最后两层的单元节点数,即2000的隐藏层和10的label层    [X, fX] = minimize(VV,'CG_CLASSIFY_INIT',max_iter,Dim,w3probs,targets);%只训练两层 %%%详细见函数定义    %minimize is Cari Rasmusssen's "minimize" code    %%------------------参数含义------------------%%    %VV         随机权重向量的展开 ,其作为输入参数,列必须为1(D by 1)          %X          函数f="CG_CLASSIFY_INIT"的最优化参数    %fX         函数f对X的偏导    %max_iter  如果为正,表示线性搜索次数,为负,函数的最大值个数    %%-------------------------------------------------%    w_class = reshape(X,l4+1,l5);%恢复权值矩阵结构  else                      %进入整体微调过程    VV = [w1(:)' w2(:)' w3(:)' w_class(:)']'; %将所有权值按列展开成一列    Dim = [l1; l2; l3; l4; l5]; %记录各层单元个数传入    [X, fX] = minimize(VV,'CG_CLASSIFY',max_iter,Dim,data,targets);    w1 = reshape(X(1:(l1+1)*l2),l1+1,l2);   %恢复W1权值1.0    xxx = (l1+1)*l2;  %临时变量,用于恢复权值单元    w2 = reshape(X(xxx+1:xxx+(l2+1)*l3),l2+1,l3);    xxx = xxx+(l2+1)*l3;    w3 = reshape(X(xxx+1:xxx+(l3+1)*l4),l3+1,l4);    xxx = xxx+(l3+1)*l4;    w_class = reshape(X(xxx+1:xxx+(l4+1)*l5),l4+1,l5);  end%%%%%%%%%%%%%%% END OF CONJUGATE GRADIENT WITH 3 LINESEARCHES %%%%%%%%%%%%%%%%%%%%%%%%%%%%% end save mnistclassify_weights w1 w2 w3 w_class save mnistclassify_error test_err test_crerr train_err train_crerr;end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%rbm.m%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\epsilonw      = 0.1;   % Learning rate for weights epsilonvb     = 0.1;   % Learning rate for biases of visible units epsilonhb     = 0.1;   % Learning rate for biases of hidden units weightcost  = 0.0002;   initialmomentum  = 0.5;finalmomentum    = 0.9;[numcases numdims numbatches]=size(batchdata);%%numcases  每批数据的个数%%numdims   数据元组的维度%%numbtches 数据批数if restart ==1,  restart=0;  epoch=1;% Initializing symmetric weights and biases. 初始化对称权值和偏置  vishid     = 0.1*randn(numdims, numhid); %初始化生成可视层到隐藏层的权值  hidbiases  = zeros(1,numhid);%隐藏单元的偏置值  visbiases  = zeros(1,numdims);%可见单元的偏置值  poshidprobs = zeros(numcases,numhid); %正向的隐藏单元概率生成  neghidprobs = zeros(numcases,numhid);%反向的隐藏单元概率生成  posprods    = zeros(numdims,numhid);%正向可见单元概率生成  negprods    = zeros(numdims,numhid);%反向可见单元概率生成  vishidinc  = zeros(numdims,numhid);%%%%%可视单元和隐藏单元之间的权值增量  hidbiasinc = zeros(1,numhid);%%隐藏单元的偏置增量  visbiasinc = zeros(1,numdims);%%可视单元的偏置增量  batchposhidprobs=zeros(numcases,numhid,numbatches);%存储每次迭代计算好的每层的隐藏层概率,作为下一个RBM的输入end%%%%%%%%%%%%%%%%简单输出 迭代次数 处理的批次%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%for epoch = epoch:maxepoch,  %迭代处理 fprintf(1,'epoch %d\r',epoch);  errsum=0; %初始化输出错误为0 for batch = 1:numbatches, %每次处理一批次的数据 fprintf(1,'epoch %d batch %d\r',epoch,batch); %%%%%%%%% START POSITIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  data = batchdata(:,:,batch); %读取当前批次的全部数据vi  poshidprobs = 1./(1 + exp(-data*vishid - repmat(hidbiases,numcases,1))); %计算前向传播的隐藏层概率hi  batchposhidprobs(:,:,batch)=poshidprobs;%将计算好的概率赋值给当前批次前向传播的隐藏层最后一次计算好的值作为下一层的输入  posprods    = data' * poshidprobs;%contrastive divergence过程<vi,hi>    poshidact   = sum(poshidprobs);%average-wise隐藏层激活概率值  posvisact = sum(data);%average-wise可视层激活概率值%%%%%%%%% END OF POSITIVE PHASE  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  poshidstates = poshidprobs > rand(numcases,numhid);%gibbs抽样,设定状态%%%%%%%%% START NEGATIVE PHASE  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  negdata = 1./(1 + exp(-poshidstates*vishid' - repmat(visbiases,numcases,1)));%根据hi计算vi+1  neghidprobs = 1./(1 + exp(-negdata*vishid - repmat(hidbiases,numcases,1)));   %根据vi+1计算hi+1  negprods  = negdata'*neghidprobs;%contrastive divergence <vi+1,hi+1>    neghidact = sum(neghidprobs);  negvisact = sum(negdata); %%%%%%%%% END OF NEGATIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%  err= sum(sum( (data-negdata).^2 )); %重新构建数据的方差  errsum = err + errsum;%整体方差   if epoch>5, %迭代次数不同调整冲量     momentum=finalmomentum;   else     momentum=initialmomentum;   end;%%%%%%%%% UPDATE WEIGHTS AND BIASES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%     vishidinc = momentum*vishidinc + ...                epsilonw*( (posprods-negprods)/numcases - weightcost*vishid);%权重增量计算    visbiasinc = momentum*visbiasinc + (epsilonvb/numcases)*(posvisact-negvisact);%偏置增量计算    hidbiasinc = momentum*hidbiasinc + (epsilonhb/numcases)*(poshidact-neghidact);%隐藏层增量计算    vishid = vishid + vishidinc;    visbiases = visbiases + visbiasinc;    hidbiases = hidbiases + hidbiasinc;%%%%%%%%%%%%%%%% END OF UPDATES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%   end  fprintf(1, 'epoch %4i error %6.1f  \n', epoch, errsum); end; %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%CG_CLASSIFY_INIT.M%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\function [f, df] = CG_CLASSIFY_INIT(VV,Dim,w3probs,target);%CG对最上面两层的训练l1 = Dim(1);l2 = Dim(2);N = size(w3probs,1); % Do decomversion.  w_class = reshape(VV,l1+1,l2); %恢复权重,  w3probs = [w3probs  ones(N,1)];  %一列,偏置  targetout = exp(w3probs*w_class);  %计算label层的输出结果为numbercase*lablesnumber的矩阵  targetout = targetout./repmat(sum(targetout,2),1,10); %选择最后的激活单元,见backpropclassify.m 的76行  f = -sum(sum( target(:,1:end).*log(targetout))) ; %交叉熵  只采用了前边部分  IO = (targetout-target(:,1:end));   % 输入和输出结果之间的差值Ix_class=IO; %dw_class =  w3probs'*Ix_class;%导数F(x)((1-F(x))乘以输出结果的偏差..其中F为sigmoid函数 df = [dw_class(:)']';   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%CG_CLASSIFY.M%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%   该段代码对所有权重进行整体微调%   各部分过程见 CG_CLASSIFY_INIT.m注解%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%function [f, df] = CG_CLASSIFY(VV,Dim,XX,target);%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%rbmhidlinear.m%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%除了最后计算单元值采用的是线性单元其余过程全部一样%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



原文地址:http://www.cnblogs.com/BeDPS/archive/2013/07/10/3182725.html


Matlab示例代码为两部分,分别对应不同的论文:

1. Reducing the Dimensionality of data with neural networks 

  ministdeepauto.m   backprop.m   rbmhidlinear.m

2. A fast learing algorithm for deep belief net

  mnistclassify.m   backpropclassfy.m  

 其余部分代码通用。

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
mnistclassify.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

clear all
close all

maxepoch=50; %迭代次数
numhid=500; numpen=500; numpen2=2000;

fprintf(1,'Converting Raw files into Matlab format \n');
converter;

fprintf(1,'Pretraining a deep autoencoder. \n');
fprintf(1,'The Science paper used 50 epochs. This uses %3i \n', maxepoch);

makebatches;%分批数据 
[numcases numdims numbatches]=size(batchdata); %获取batchdata数据大小
%%numcases 每批数据的个数
%%numdims 数据元组的维度
%%numbtches 数据批数

fprintf(1,'Pretraining Layer 1 with RBM: %d-%d \n',numdims,numhid);%图像输入层到第一个隐藏层
restart=1; %设置初始化参数
rbm; %调用RBM训练数据 
hidrecbiases=hidbiases; %获取隐藏层偏置值
save mnistvhclassify vishid hidrecbiases visbiases; %

fprintf(1,'\nPretraining Layer 2 with RBM: %d-%d \n',numhid,numpen);%第一个隐藏层到第二个隐藏层
batchdata=batchposhidprobs; %上一个RBM的隐藏层输出,读入作为这个RBM的输入
numhid=numpen;%设置隐藏层的节点数,输入的节点数已经由读入数据给出
restart=1;
rbm;
hidpen=vishid; penrecbiases=hidbiases; hidgenbiases=visbiases; %同上,提取权值,偏置,
save mnisthpclassify hidpen penrecbiases hidgenbiases;

fprintf(1,'\nPretraining Layer 3 with RBM: %d-%d \n',numpen,numpen2);%第二个隐藏层到第三层隐藏层,其余同上
batchdata=batchposhidprobs;
numhid=numpen2;
restart=1;
rbm;
hidpen2=vishid; penrecbiases2=hidbiases; hidgenbiases2=visbiases;
save mnisthp2classify hidpen2 penrecbiases2 hidgenbiases2;

backpropclassify;

 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
backpropclassify.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
maxepoch=200;
fprintf(1,'\nTraining discriminative model on MNIST by minimizing cross entropy error. \n');%最小化交叉熵
fprintf(1,'60 batches of 1000 cases each. \n');

load mnistvhclassify%加载各层之间的权值,以及偏置
load mnisthpclassify
load mnisthp2classify

makebatches;%分批数据
[numcases numdims numbatches]=size(batchdata);
N=numcases; %获取每批数据向量数

%%%% PREINITIALIZE WEIGHTS OF THE DISCRIMINATIVE MODEL%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

w1=[vishid; hidrecbiases];%第一层到第二层的权重,以及第二层的偏置
w2=[hidpen; penrecbiases];%类上
w3=[hidpen2; penrecbiases2];%类上
w_class = 0.1*randn(size(w3,2)+1,10);%随机生成第四层列数+1行,10列的矩阵

%%%%%%%%%% END OF PREINITIALIZATIO OF WEIGHTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

l1=size(w1,1)-1;%获取每层的单元个数
l2=size(w2,1)-1;
l3=size(w3,1)-1;
l4=size(w_class,1)-1;%最高层的单元个数
l5=10; %label层单元个数
test_err=[];%
train_err=[];%


for epoch = 1:maxepoch

%%%%%%%%%%%%%%%%%%%% COMPUTE TRAINING MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
err=0; 
err_cr=0;
counter=0;
[numcases numdims numbatches]=size(batchdata);
%%numcases 每批数据的个数
%%numdims 数据元组的维度
%%numbtches 数据批数
N=numcases;%%每批次数据向量个数
for batch = 1:numbatches
data = [batchdata(:,:,batch)];%读取一批次数据
target = [batchtargets(:,:,batch)];%读取当前批次的目标值
data = [data ones(N,1)];%在原数据后添加N行1列数据
w1probs = 1./(1 + exp(-data*w1)); w1probs = [w1probs ones(N,1)];%sigmod计算各层的概率值,参见BP算法
w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];
w3probs = 1./(1 + exp(-w2probs*w3)); w3probs = [w3probs ones(N,1)];

targetout = exp(w3probs*w_class);%计算最后的输出值N行10列
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%对最后的label的输出处理过程,见公式6.1,其中w3probs*w_class是label的输入
%最后只能有一个单元被激活,激活单元的选择即通过下面计算得出的概率来进行选择
%10个单元组成的“softmax”组
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
targetout = targetout./repmat(sum(targetout,2),1,10);%计算最后10个label输出除以输出值的总和

[I J]=max(targetout,[],2);%取计算结果每行中的最大值,以及其列标
[I1 J1]=max(target,[],2);%取原先设定目标值的最大值以及列标
counter=counter+length(find(J==J1));%统计正确的条数
err_cr = err_cr- sum(sum( target(:,1:end).*log(targetout))) ; %%%%????
end
train_err(epoch)=(numcases*numbatches-counter);%总的错误条数???
train_crerr(epoch)=err_cr/numbatches;%平均每批次错误率???

%%%%%%%%%%%%%% END OF COMPUTING TRAINING MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%%%%%%%%%%%%%%%%%%% COMPUTE TEST MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
err=0;
err_cr=0;
counter=0;
[testnumcases testnumdims testnumbatches]=size(testbatchdata);

N=testnumcases;
for batch = 1:testnumbatches
data = [testbatchdata(:,:,batch)];
target = [testbatchtargets(:,:,batch)];
data = [data ones(N,1)];
w1probs = 1./(1 + exp(-data*w1)); w1probs = [w1probs ones(N,1)];
w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];
w3probs = 1./(1 + exp(-w2probs*w3)); w3probs = [w3probs ones(N,1)];
targetout = exp(w3probs*w_class);
targetout = targetout./repmat(sum(targetout,2),1,10);

[I J]=max(targetout,[],2);
[I1 J1]=max(target,[],2);
counter=counter+length(find(J==J1));
err_cr = err_cr- sum(sum( target(:,1:end).*log(targetout))) ;
end
test_err(epoch)=(testnumcases*testnumbatches-counter);
test_crerr(epoch)=err_cr/testnumbatches;
fprintf(1,'Before epoch %d Train # misclassified: %d (from %d). Test # misclassified: %d (from %d) \t \t \n',...
epoch,train_err(epoch),numcases*numbatches,test_err(epoch),testnumcases*testnumbatches);

%%%%%%%%%%%%%% END OF COMPUTING TEST MISCLASSIFICATION ERROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

tt=0; 
for batch = 1:numbatches/10
fprintf(1,'epoch %d batch %d\r',epoch,batch);

%%%%%%%%%%% COMBINE 10 MINIBATCHES INTO 1 LARGER MINIBATCH %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%组合10个小批次为1000样例的批次,然后用conjugate gradient来进行微调
tt=tt+1; 
data=[];
targets=[]; 
for kk=1:10
data=[data 
batchdata(:,:,(tt-1)*10+kk)]; %10个小批次合成
targets=[targets
batchtargets(:,:,(tt-1)*10+kk)];
end

%%%%%%%%%%%%%%% PERFORM CONJUGATE GRADIENT WITH 3 LINESEARCHES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
max_iter=3; %设置线性搜索的次数

if epoch<6 % First update top-level weights holding other weights fixed. 
N = size(data,1); %获取数据的行数 
XX = [data ones(N,1)]; %每行数据后面增加1,用来增加偏置
w1probs = 1./(1 + exp(-XX*w1)); w1probs = [w1probs ones(N,1)];
w2probs = 1./(1 + exp(-w1probs*w2)); w2probs = [w2probs ones(N,1)];
w3probs = 1./(1 + exp(-w2probs*w3)); %w3probs = [w3probs ones(N,1)];

VV = [w_class(:)']'; %VV将随机生成的向量w_class展开成一列???为什么展开成一列与minimize的参数有关
%
Dim = [l4; l5]; %记录最后两层的单元节点数,即2000的隐藏层和10的label层
[X, fX] = minimize(VV,'CG_CLASSIFY_INIT',max_iter,Dim,w3probs,targets);%只训练两层 %%%详细见函数定义
%minimize is Cari Rasmusssen's "minimize" code
%%------------------参数含义------------------%%
%VV 随机权重向量的展开 ,其作为输入参数,列必须为1(D by 1) 
%X 函数f="CG_CLASSIFY_INIT"的最优化参数
%fX 函数f对X的偏导
%max_iter 如果为正,表示线性搜索次数,为负,函数的最大值个数
%%-------------------------------------------------%
w_class = reshape(X,l4+1,l5);%恢复权值矩阵结构

else %进入整体微调过程
VV = [w1(:)' w2(:)' w3(:)' w_class(:)']'; %将所有权值按列展开成一列
Dim = [l1; l2; l3; l4; l5]; %记录各层单元个数传入
[X, fX] = minimize(VV,'CG_CLASSIFY',max_iter,Dim,data,targets);

w1 = reshape(X(1:(l1+1)*l2),l1+1,l2); %恢复W1权值1.0
xxx = (l1+1)*l2; %临时变量,用于恢复权值单元
w2 = reshape(X(xxx+1:xxx+(l2+1)*l3),l2+1,l3);
xxx = xxx+(l2+1)*l3;
w3 = reshape(X(xxx+1:xxx+(l3+1)*l4),l3+1,l4);
xxx = xxx+(l3+1)*l4;
w_class = reshape(X(xxx+1:xxx+(l4+1)*l5),l4+1,l5);

end
%%%%%%%%%%%%%%% END OF CONJUGATE GRADIENT WITH 3 LINESEARCHES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%

end

save mnistclassify_weights w1 w2 w3 w_class
save mnistclassify_error test_err test_crerr train_err train_crerr;

end

 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
rbm.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\
epsilonw = 0.1; % Learning rate for weights 
epsilonvb = 0.1; % Learning rate for biases of visible units 
epsilonhb = 0.1; % Learning rate for biases of hidden units 
weightcost = 0.0002; 
initialmomentum = 0.5;
finalmomentum = 0.9;

[numcases numdims numbatches]=size(batchdata);
%%numcases 每批数据的个数
%%numdims 数据元组的维度
%%numbtches 数据批数

if restart ==1,
restart=0;
epoch=1;

% Initializing symmetric weights and biases. 初始化对称权值和偏置
vishid = 0.1*randn(numdims, numhid); %初始化生成可视层到隐藏层的权值
hidbiases = zeros(1,numhid);%隐藏单元的偏置值
visbiases = zeros(1,numdims);%可见单元的偏置值

poshidprobs = zeros(numcases,numhid); %正向的隐藏单元概率生成
neghidprobs = zeros(numcases,numhid);%反向的隐藏单元概率生成
posprods = zeros(numdims,numhid);%正向可见单元概率生成
negprods = zeros(numdims,numhid);%反向可见单元概率生成
vishidinc = zeros(numdims,numhid);%%%%%可视单元和隐藏单元之间的权值增量
hidbiasinc = zeros(1,numhid);%%隐藏单元的偏置增量
visbiasinc = zeros(1,numdims);%%可视单元的偏置增量
batchposhidprobs=zeros(numcases,numhid,numbatches);%存储每次迭代计算好的每层的隐藏层概率,作为下一个RBM的输入
end

%%%%%%%%%%%%%%%%简单输出 迭代次数 处理的批次%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for epoch = epoch:maxepoch, %迭代处理
fprintf(1,'epoch %d\r',epoch); 
errsum=0; %初始化输出错误为0
for batch = 1:numbatches, %每次处理一批次的数据
fprintf(1,'epoch %d batch %d\r',epoch,batch);

%%%%%%%%% START POSITIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
data = batchdata(:,:,batch); %读取当前批次的全部数据vi
poshidprobs = 1./(1 + exp(-data*vishid - repmat(hidbiases,numcases,1))); %计算前向传播的隐藏层概率hi
batchposhidprobs(:,:,batch)=poshidprobs;%将计算好的概率赋值给当前批次前向传播的隐藏层最后一次计算好的值作为下一层的输入
posprods = data' * poshidprobs;%contrastive divergence过程<vi,hi>

poshidact = sum(poshidprobs);%average-wise隐藏层激活概率值
posvisact = sum(data);%average-wise可视层激活概率值

%%%%%%%%% END OF POSITIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
poshidstates = poshidprobs > rand(numcases,numhid);%gibbs抽样,设定状态

%%%%%%%%% START NEGATIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
negdata = 1./(1 + exp(-poshidstates*vishid' - repmat(visbiases,numcases,1)));%根据hi计算vi+1
neghidprobs = 1./(1 + exp(-negdata*vishid - repmat(hidbiases,numcases,1))); %根据vi+1计算hi+1
negprods = negdata'*neghidprobs;%contrastive divergence <vi+1,hi+1>

neghidact = sum(neghidprobs);
negvisact = sum(negdata);

%%%%%%%%% END OF NEGATIVE PHASE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
err= sum(sum( (data-negdata).^2 )); %重新构建数据的方差
errsum = err + errsum;%整体方差

if epoch>5, %迭代次数不同调整冲量
momentum=finalmomentum;
else
momentum=initialmomentum;
end;

%%%%%%%%% UPDATE WEIGHTS AND BIASES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
vishidinc = momentum*vishidinc + ...
epsilonw*( (posprods-negprods)/numcases - weightcost*vishid);%权重增量计算
visbiasinc = momentum*visbiasinc + (epsilonvb/numcases)*(posvisact-negvisact);%偏置增量计算
hidbiasinc = momentum*hidbiasinc + (epsilonhb/numcases)*(poshidact-neghidact);%隐藏层增量计算

vishid = vishid + vishidinc;
visbiases = visbiases + visbiasinc;
hidbiases = hidbiases + hidbiasinc;

%%%%%%%%%%%%%%%% END OF UPDATES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

end
fprintf(1, 'epoch %4i error %6.1f \n', epoch, errsum); 
end;

 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
CG_CLASSIFY_INIT.M
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\
function [f, df] = CG_CLASSIFY_INIT(VV,Dim,w3probs,target);%CG对最上面两层的训练
l1 = Dim(1);
l2 = Dim(2);
N = size(w3probs,1); 
% Do decomversion.
w_class = reshape(VV,l1+1,l2); %恢复权重,
w3probs = [w3probs ones(N,1)]; %一列,偏置

targetout = exp(w3probs*w_class); %计算label层的输出结果为numbercase*lablesnumber的矩阵
targetout = targetout./repmat(sum(targetout,2),1,10); %选择最后的激活单元,见backpropclassify.m 的76行
f = -sum(sum( target(:,1:end).*log(targetout))) ; %交叉熵 只采用了前边部分

IO = (targetout-target(:,1:end)); % 输入和输出结果之间的差值
Ix_class=IO; %
dw_class = w3probs'*Ix_class;%导数F(x)((1-F(x))乘以输出结果的偏差..其中F为sigmoid函数

df = [dw_class(:)']';

 

 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
CG_CLASSIFY.M
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 该段代码对所有权重进行整体微调
% 各部分过程见 CG_CLASSIFY_INIT.m注解
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function [f, df] = CG_CLASSIFY(VV,Dim,XX,target);


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
rbmhidlinear.m
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%除了最后计算单元值采用的是线性单元其余过程全部一样
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


0 0