[torch]Getting the Output of a Layer

来源:互联网 发布:合肥网络招聘会 编辑:程序博客网 时间:2024/06/05 20:44

https://groups.google.com/forum/#!topic/torch7/R9DAdx95aqc

introduction

require 'cutorch'require 'cunn'require 'rnn'require 'os'tensor1 = torch.zeros(5,10)tensor1[3]=torch.rand(1,10)print(tensor1)input = {tensor1,torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10)}net = nn.Sequencer(   nn.Sequential()      :add(nn.MaskZero(nn.FastLSTM(10,3),1))      :add(nn.MaskZero(nn.Linear(3,4),1))      :add(nn.MaskZero(nn.LogSoftMax(),1)))output = net:forward(input)local m = net.modules--[[print("net")print(net)print("m")print(m)--]]for i = 1, #input do        print(output[i])        print(m[1].sharedClones[i].modules[1].output)endprint("net")print(net)print("m")print(m)

output

net nn.Sequencer @ nn.Recursor @ nn.Sequential {  [input -> (1) -> (2) -> (3) -> output]  (1): nn.MaskZero @ nn.FastLSTM(10 -> 3)  (2): nn.MaskZero @ nn.Linear(3 -> 4)  (3): nn.MaskZero @ nn.LogSoftMax}m   {  1 :     {      sharedClones :         {          1 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }          2 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }          3 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }          4 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }          5 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }          6 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }        }      step : 7      outputs :         {          1 : DoubleTensor - size: 5x4          2 : DoubleTensor - size: 5x4          3 : DoubleTensor - size: 5x4          4 : DoubleTensor - size: 5x4          5 : DoubleTensor - size: 5x4          6 : DoubleTensor - size: 5x4        }      output : DoubleTensor - size: 5x4      gradInput : DoubleTensor - empty      modules :         {          1 :             {              gradInput : DoubleTensor - empty              modules :                 {                  1 : {...}                  2 : {...}                  3 : {...}                }              _type : "torch.DoubleTensor"              output : DoubleTensor - size: 5x4            }        }      _gradOutputs : {...}      rho : 6      recurrentModule :         {          gradInput : DoubleTensor - empty          modules :             {              1 :                 {                  output : DoubleTensor - size: 5x3                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }              2 :                 {                  output : DoubleTensor - size: 5x4                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }              3 :                 {                  output : DoubleTensor - size: 5x4                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }            }          _type : "torch.DoubleTensor"          output : DoubleTensor - size: 5x4        }      nSharedClone : 6      _type : "torch.DoubleTensor"      gradInputs : {...}      module :         {          gradInput : DoubleTensor - empty          modules :             {              1 :                 {                  output : DoubleTensor - size: 5x3                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }              2 :                 {                  output : DoubleTensor - size: 5x4                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }              3 :                 {                  output : DoubleTensor - size: 5x4                  gradInput : DoubleTensor - empty                  nInputDim : 1                  batchmode : true                  zeroMask : ByteTensor - size: 5x1                  _type : "torch.DoubleTensor"                  _zeroMask : DoubleTensor - size: 5x1                  module : {...}                  modules : {...}                }            }          _type : "torch.DoubleTensor"          output : DoubleTensor - size: 5x4        }      rmInSharedClones : true    }}

可以看出m是一个table类型的变量. 所以看看想要它输出什么就能输出什么.
例如:

tensor1 = torch.zeros(5,10)tensor1[3]=torch.rand(1,10)print(tensor1)input = {tensor1,torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10)}net = nn.Sequencer(   nn.Sequential()      :add(nn.MaskZero(nn.FastLSTM(10,3),1))--      :add(nn.MaskZero(nn.Linear(3,4),1))--      :add(nn.MaskZero(nn.LogSoftMax(),1)))output = net:forward(input)local m = net.modules--[[print("net")print(net)print("m")print(m)--]]for i = 1, #input do        print(output[i])        print(m[1].sharedClones[i].modules[1].output)end)

test

require 'cutorch'require 'cunn'require 'rnn'require 'os'--[[net = nn.Sequencer(   nn.Sequential()      :add(nn.MaskZero(nn.FastLSTM(10,6),1))      :add(nn.MaskZero(nn.Linear(6,4),1))      :add(nn.MaskZero(nn.LogSoftMax(),1)))parameters, gradParameters = net:getParameters()lightModel = net:clone('weight','bias','running_mean','running_std')torch.save('model.t7',lightModel)--]]net=torch.load("model.t7")--[[tensor1 = torch.zeros(5,10)tensor1[3]=torch.Tensor{3,4,5,6,7,8,23,2,12,90}tensor2 = torch.ones(5,10)tensor2[{{1,2},{}}]=torch.Tensor{ {1,3,4,5,6,0,3,2,56,2}, {5,3,2,5,7,3,45,78,235,10}}tensor2[4]=torch.ones(1,10):fill(3.2)tensor2[5]=torch.zeros(1,10)input = {tensor1,tensor2}--]]--net=torch.load("/work1/t2g-shinoda2011/15M54105/trecvid/torch-lstm3/batch5_epoch5_hiddensize256_cw1/model_100ex_batch5_unit256_epoch70")--[[array = {}tensor1  = torch.zeros(5,10)tensor1[3]=torch.rand(1,10)tensor2 = torch.rand(5,10)tensor3 = torch.rand(5,10)tensor4 = torch.rand(5,10)tensor1=tensor1:cuda()tensor2=tensor2:cuda()tensor3=tensor3:cuda()tensor4=tensor4:cuda()table.insert(array, tensor1)table.insert(array, tensor2)table.insert(array, tensor3)table.insert(array, tensor4)file = torch.DiskFile('input.asc', 'w')file:writeObject(array)file:close()os.exit()--]]net:cuda()file = torch.DiskFile('input.asc', 'r')input = file:readObject()print(input)local m = net.modulesoutput = net:forward(input)--[[print("net")print(net)print("m")print(m)--]]model = (nn.MaskZero(nn.LogSoftMax(),1)):cuda()for seqj = 1, #input do    print(seqj)    res = m[1].sharedClones[seqj].modules[2].output    out1=output[seqj]    out2=model:forward(res)    print(out1-out2)end

test.lua得到的tep = m[1].sharedClones[seqj].modules[2].output[i]是取了net的第二层的输出.
out1与out2值相等.说明确实tep是第二层的输出.

0 0
原创粉丝点击
热门问题 老师的惩罚 人脸识别 我在镇武司摸鱼那些年 重生之率土为王 我在大康的咸鱼生活 盘龙之生命进化 天生仙种 凡人之先天五行 春回大明朝 姑娘不必设防,我是瞎子 晚上睡觉时间短怎么办 小孩说爸爸死了怎么办 扫地机器人坏了怎么办 孩子思维逻辑差怎么办 喜欢动手打别人怎么办 小孩写作文困难怎么办 2岁儿子打人怎么办 三岁宝宝爱咬人怎么办 幼儿园小孩咬人怎么办 孩子不会动手打人怎么办 小孩子隔奶奶涨怎么办 小孩子戒奶奶涨怎么办 孩子总打别人怎么办 儿童在学校打人怎么办 两岁儿童打人怎么办 孩子哭闹要东西怎么办 高中孩子爱打架怎么办 宝宝性格太弱怎么办 宝宝太老实了怎么办 一岁多宝宝爱打人怎么办 一岁半宝宝爱打人怎么办 两岁半宝宝喜欢打人怎么办 孩子总是挨欺负怎么办 一年级孩子爱打架怎么办 孩子没规矩家长怎么办 小朋友在幼儿园打人怎么办 幼儿园小朋友喜欢打人怎么办 三周岁宝宝爱哭怎么办 2周岁宝宝爱哭怎么办 一个月宝宝爱哭怎么办 2岁宝宝爱哭怎么办 三岁小朋友打人怎么办 一岁半小朋友喜欢打人怎么办 爷爷偏心我该怎么办 冲动型学生老师怎么办 两个宝宝争东西怎么办 2周岁宝宝打人怎么办 小孩幼儿园被打怎么办 小孩喝了润滑油怎么办 孩子被打了怎么办 宝宝吃了指甲油怎么办