[torch]create a new criterion(cross entropy)

来源:互联网 发布:网络主播怎么吸引粉丝 编辑:程序博客网 时间:2024/06/16 19:51

http://blog.csdn.net/u010167269/article/details/51966427

说明

torch自带的crossentropycriterion

torch本来的CrossEntropyCriterion和ClassNLLCriterion只支持hard targets.

以多分类问题为例. 假设有4类:dog, cat, carrot, book.
在训练cat这一类时, 给的标签是P=(0,1,0,0),模型的输出为Q=(0.1,0.7,0.01,0.19)
CrossEntropyCriterion(torch)
loss=log(q2)
即使标签是soft targetP=(0.3,0.5,0.2,0.1), torch的CrossEntropyCriterion在计算的时候也不会考虑其他的类.
MyCrossEntropyCriterion
实际上crossentropy应该是loss=ipilog(qi)=p1log(q1)p2log(q2)...

另外再说一点:
“DistKLDivCriterion” is the KL divergence(relative entropy),
(pi)log(pi/qi)=[(pi)log(pi)(pi)log(qi)],
when pi(targets) is fixed, relative entropy is same as cross_entropy.

weight, average, maxzero

backward的计算

https://ckmarkoh.github.io/blog/2017/01/01/torch-nn-tutorial-5-backward-propagation/

loss=criterion:forward(input,target)gradout = criterion:backward(input,target)

input: logsoftmax (可以在模型的最后一层加入nn.LogSoftmax())
target: soft targets or hard targets (the dimension should be same as input)

backward:
gradout=lossinput=(ipilog(qi)log(q1),ipilog(qi)log(q2),)=(-p_1,-p_2,…)=-target

CPU:

MyCrossEntropyCriterion.lua

require 'nn'require 'os'local MyCrossEntropyCriterion, parent = torch.class('nn.MyCrossEntropyCriterion', 'nn.Criterion')function MyCrossEntropyCriterion:__init(weights, sizeAverage, maskZero)    parent.__init(self)    self.sizeAverage = true    if sizeAverage ~= nil then        self.sizeAverage = sizeAverage    end    self.maskZero = false    if maskZero ~= nil then        self.maskZero = maskZero    end    if weights then        assert(weights:dim() == 1, "weights input should be 1-D Tensor")        self.weights = weights    endendfunction MyCrossEntropyCriterion:updateOutput(input, target)    self.mask = torch.ones(input:size())    if self.maskZero then        local rows = torch.sum(input, 2)        local rowid = rows:ne(0) --:nonzero()[{ {},{1} }]        --print(rows,rowid)        rowid = torch.repeatTensor(rowid,1,input:size()[2])        self.mask = torch.DoubleTensor()        self.mask:resize(rowid:size()):copy(rowid)        target:cmul(self.mask)        if torch.sum(self.mask) == 0 then            print("torch.sum(self.mask) == 0 !")            os.exit()        end    end    local temp    if input:dim() == 2 then --batchsize > 1        self.out = torch.zeros(1,input:size()[2])        self.batchsize = input:size()[1]        for i = 1, self.batchsize do            temp = target[i]:clone()            temp:cmul(input[i])            if (self.weights) then                temp:cmul(self.weights)            end            self.out:add(temp)              end    elseif input:dim() == 1 then --batchsize=1        self.batchsize = 1        temp = target:clone()                temp:cmul(input)                self.out = temp    end    if self.sizeAverage then        local divnum = target:clone()        if (self.weights) then            weight_rep = torch.repeatTensor(self.weights, self.batchsize, 1)                divnum:cmul(weight_rep)        end        divnum:cmul(self.mask)        self.divnum = divnum:sum()        if self.divnum == 0 then            print("self.divnum==0!")            os.exit()        else            self.out:div(self.divnum)        end    end    self.output = -torch.sum(self.out)    return self.outputendfunction MyCrossEntropyCriterion:updateGradInput(input, target)    --self.gradInput = -torch.cdiv(target, input)    self.gradInput = -target    if self.maskZero then        self.gradInput:cmul(self.mask)    end    if (self.weights) then        weight_rep = torch.repeatTensor(self.weights, self.batchsize, 1)        self.gradInput:cmul(weight_rep)    end    if self.sizeAverage then                self.gradInput:div(self.divnum)        end    --print("MyCriterion_backward memory: " .. collectgarbage("count") .. "KB")    return self.gradInputendreturn MyCrossEntropyCriterion

test1.lua

多分类问题
刚才写的MyCrossEntropyCriterion.lua和这个文件放在同一个目录下.

require 'nn'require 'os'require 'rnn'require 'MyCrossEntropyCriterion'--for maxzero = 0,1 do--for data = 0,1 domaxzero = 1data =  0sizeAverage = true--weight=torch.ones(3)weight=torch.Tensor{0.1,0.2,0.4}if data == 0 then        y_ = torch.Tensor{{-1.20397280433,-2.30258509299,-0.51082562376},{-2.30258509299,-0.22314355131,-2.30258509299},{0,0,0}} --{log0.3,log0.1,log0.6},{log0.1,log0.8,log0.1}        y_ = torch.Tensor{{1,2,3},{3,5,1},{0,0,0}}        y_d = torch.Tensor{3,2,1}  --y_d[3] can be any number(1<=y_d[3]<=3)        y_s = torch.Tensor{{0,0,1},{0,1,0},{1,0,0}} --y_s[3] can be any tensor        y_s = torch.Tensor{{0.1,0.2,0.7},{0.09,0.8,0.11},{0.5,0.2,0.3}}else        y_ = torch.Tensor{{-1.20397280433,-2.30258509299,-0.51082562376},{-2.30258509299,-0.22314355131,-2.30258509299}} --{log0.3,log0.1,log0.6},{log0.1,log0.8,log0.1}        y_ = torch.Tensor{{1,2,3},{3,5,1}}        y_d = torch.Tensor{3,2}        y_s = torch.Tensor{{0,0,1},{0,1,0}}endprint("data")print(y_)print("hard_targ, soft_targ")print(y_d,y_s)if maxzero == 1 then        c1 = nn.MyCrossEntropyCriterion(weight)        c1.maskZero = true        c1.sizeAverage = sizeAverage        c3_t = nn.ClassNLLCriterion(weight)        c3_t.sizeAverage = sizeAverage        c3 = nn.MaskZeroCriterion(c3_t,1)else        c1 = nn.MyCrossEntropyCriterion(weight)        c1.sizeAverage = sizeAverage        c3 = nn.ClassNLLCriterion(weight)        c3.sizeAverage = sizeAverageendo1 = c1:forward(y_,y_s)o3 = c3:forward(y_,y_d)print(o1-o3)o1b = c1:backward(y_,y_s)o3b = c3:backward(y_,y_d)print(o1b-o3b)--end--end

test2.lua

require 'nn'require 'os'require 'rnn'require 'MyCrossEntropyCriterion'for maxzero = 0, 1 dofor data = 0, 1 do--maxzero = 1--data = 0print(maxzero,data)sizeAverage = truesizeAverage = false--weight=torch.ones(3)weight=torch.Tensor{0.1,0.2,0.3}weight=torch.Tensor{1,0.2,1}y_ = {}y_d = {}y_s = {}model=nn.MaskZero(nn.LogSoftMax(),1)if data == 0 then        y_1 = torch.Tensor{{1,2,3},{0,0,0},{0,0,0}}        y_d1 = torch.Tensor{3,0,0}        y_s1 = torch.Tensor{{0,0,1},{0,0,0},{0,0,0}}        y_d1 = torch.Tensor{3,1,1}        y_s1 = torch.Tensor{{0,0,1},{1,0,0},{1,0,0}}        y_2 = torch.Tensor{{23,17,3},{2,41,7},{0,0,0}}        y_d2 = torch.Tensor{1,2,0}        y_s2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,0}}        y_d2 = torch.Tensor{1,2,3}        y_s2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,1}}        y_3 = torch.Tensor{{11,32,1},{1,45,34},{12,2,6}}        y_d3 = torch.Tensor{2,2,1}        y_s3 = torch.Tensor{{0,1,0},{0,1,0},{1,0,0}}else        y_1 = torch.Tensor{{1,2,3},{14,12,3},{33,2,7}}        y_d1 = torch.Tensor{3,1,1}        y_s1 = torch.Tensor{{0,0,1},{1,0,0},{1,0,0}}        y_2 = torch.Tensor{{23,17,3},{2,41,7},{8,23,51}}        y_d2 = torch.Tensor{1,2,3}        y_s2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,1}}        y_3 = torch.Tensor{{11,32,1},{1,45,34},{12,2,6}}        y_d3 = torch.Tensor{2,2,1}        y_s3 = torch.Tensor{{0,1,0},{0,1,0},{1,0,0}}endtable.insert(y_,model:forward(y_1))table.insert(y_d,y_d1)table.insert(y_s,y_s1)table.insert(y_,model:forward(y_2))table.insert(y_d,y_d2)table.insert(y_s,y_s2)table.insert(y_,model:forward(y_3))table.insert(y_d,y_d3)table.insert(y_s,y_s3)if maxzero == 1 then        c1 = nn.MyCrossEntropyCriterion(weight)        c1.maskZero = true        c1.sizeAverage = sizeAverage        c3_t = nn.ClassNLLCriterion(weight)        c3_t.sizeAverage = sizeAverage        c3 = nn.MaskZeroCriterion(c3_t,1)else        c1 = nn.MyCrossEntropyCriterion(weight)        c1.sizeAverage = sizeAverage        c3 = nn.ClassNLLCriterion(weight)        c3.sizeAverage = sizeAverageendc1_seq=nn.SequencerCriterion(c1)c3_seq=nn.SequencerCriterion(c3)o1 = c1_seq:forward(y_,y_s)o3 = c3_seq:forward(y_,y_d)print(o1-o3)o1b = c1_seq:backward(y_,y_s)o3b = c3_seq:backward(y_,y_d)for i=1,3 do        print(string.format("seq:%d", i))        print("mine")        print(o1b[i])        print("ori")        print(o3b[i])        print(string.format("%f",o1b[i][1][1]))        os.exit()        print(o1b[i]-o3b[i])endendend

GPU(C/CUDA)

http://torch.ch/docs/developer-docs.html
https://zhuanlan.zhihu.com/p/21550685
examples:
https://github.com/torch/nn/blob/master/ClassNLLCriterion.lua
https://github.com/torch/nn/blob/master/lib/THNN/generic/ClassNLLCriterion.c
https://github.com/torch/cunn/blob/master/lib/THCUNN/ClassNLLCriterion.cu

details

1.MyCrossEntropyCriterionGPU.lua

新建torch/distro/extra/nn/MyCrossEntropyCriterionGPU.lua

local THNN = require 'nn.THNN'local MyCrossEntropyCriterionGPU, parent = torch.class('nn.MyCrossEntropyCriterionGPU', 'nn.Criterion')function MyCrossEntropyCriterionGPU:__init(weights, sizeAverage)    parent.__init(self)    if sizeAverage ~= nil then        self.sizeAverage = sizeAverage    else        self.sizeAverage = true    end    if weights then        assert(weights:dim() == 1, "weights input should be 1-D Tensor")        self.weights = weights    end    self.output_tensor = torch.zeros(1)    self.total_weight_tensor = torch.ones(1)    self.target = torch.zeros(1):long()endfunction MyCrossEntropyCriterionGPU:__len()    if (self.weights) then        return #self.weights    else        return 0    endendfunction MyCrossEntropyCriterionGPU:updateOutput(input, target)    --[[if type(target) == 'number' then        if torch.typename(input):find('torch%.Cuda.*Tensor') then            self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda()        else            self.target = self.target:long()        end        self.target:resize(1)        self.target[1] = target    elseif torch.typename(input):find('torch%.Cuda.*Tensor') then        self.target = torch.CudaLongTensor and target:cudaLong() or target    else        self.target = target:long()    end--]]    self.target = target    input.THNN.MyCrossEntropyCriterionGPU_updateOutput(        input:cdata(),        self.target:cdata(),        self.output_tensor:cdata(),        self.sizeAverage,        THNN.optionalTensor(self.weights),        self.total_weight_tensor:cdata()    )    self.output = self.output_tensor[1]    return self.output, self.total_weight_tensor[1]endfunction MyCrossEntropyCriterionGPU:updateGradInput(input, target)    --[[if type(target) == 'number' then        if torch.typename(input):find('torch%.Cuda.*Tensor') then            self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda()        else            self.target = self.target:long()        end        self.target:resize(1)        self.target[1] = target    elseif torch.typename(input):find('torch%.Cuda.*Tensor') then        self.target = torch.CudaLongTensor and target:cudaLong() or target    else        self.target = target:long()    end--]]    self.target = target    self.gradInput:resizeAs(input):zero()    input.THNN.MyCrossEntropyCriterionGPU_updateGradInput(        input:cdata(),        self.target:cdata(),        self.gradInput:cdata(),        self.sizeAverage,        THNN.optionalTensor(self.weights),        self.total_weight_tensor:cdata()    )    return self.gradInputend

2.MyCrossEntropyCriterionGPU.c

新建torch/distro/extra/nn/lib/THNN/generic/MyCrossEntropyCriterionGPU.c

#ifndef TH_GENERIC_FILE#define TH_GENERIC_FILE "generic/MyCrossEntropyCriterionGPU.c"#elsevoid THNN_(MyCrossEntropyCriterionGPU_updateOutput)(    THNNState *state,    THTensor *input,    THTensor *target,    THTensor *output,    bool sizeAverage,    THTensor *weights,    THTensor *total_weight){    THNN_CHECK_DIM_SIZE(output, 1, 0, 1);    THNN_CHECK_DIM_SIZE(total_weight, 1, 0, 1);    int n_dims = THTensor_(nDimension)(input);    int n_classes = THTensor_(size)(input, n_dims - 1);    if (THTensor_(nDimension)(input) > 2) {        THError("input tensor should be 1D or 2D");    }    if (weights && THTensor_(nElement)(weights) != n_classes) {        THDescBuff s1 = THTensor_(sizeDesc)(weights);        THError("weight tensor should be defined either for all %d classes or no classes"        " but got weight tensor of shape: %s", n_classes, s1.str);    }    input = THTensor_(newContiguous)(input);    target = THTensor_(newContiguous)(target);    weights = weights ? THTensor_(newContiguous)(weights) : NULL;    real *input_data = THTensor_(data)(input);    real *target_data = THTensor_(data)(target);    real *weights_data = weights ? THTensor_(data)(weights) : NULL;    real *output_data = THTensor_(data)(output);    real *total_weight_data = THTensor_(data)(total_weight);    output_data[0] = total_weight_data[0] = 0.0;    if (THTensor_(nDimension)(input) == 1) {        int t;        for (t=0; t < n_classes; t++) {            real cur_weight = weights ? weights_data[t] : 1.0f;            total_weight_data[0] += cur_weight * target_data[t];            output_data[0] -= input_data[t] * cur_weight * target_data[t];        }    } else if (THTensor_(nDimension)(input) == 2) {        int batch_size = THTensor_(size)(input, 0);        THAssert(THTensor_(size)(target, 0) == batch_size);        int i,t;        for (i = 0; i < batch_size; i++) {            for (t=0; t < n_classes; t++) {                real cur_weight = weights ? weights_data[t] : 1.0f;                total_weight_data[0] += cur_weight * target_data[i * n_classes + t];                output_data[0] -= input_data[i * n_classes + t] * target_data[i * n_classes + t] * cur_weight;            }              }    }    if (sizeAverage && total_weight_data[0]) {        output_data[0] /= total_weight_data[0];    }    if (weights) {        THTensor_(free)(weights);    }    THTensor_(free)(input);    THTensor_(free)(target);}void THNN_(MyCrossEntropyCriterionGPU_updateGradInput)(    THNNState *state,    THTensor *input,    THTensor *target,    THTensor *gradInput,    bool sizeAverage,    THTensor *weights,    THTensor *total_weight){    int n_dims = THTensor_(nDimension)(input);    int n_classes = THTensor_(size)(input, n_dims - 1);    if (!THTensor_(isContiguous)(gradInput)) {        THError("gradInput must be contiguous");    }    real *total_weight_data = THTensor_(data)(total_weight);    if (!(*total_weight_data > 0)) {        return;    }    if (THTensor_(nDimension)(target) != THTensor_(nDimension)(input)) {        THError("the dim of target and input should be same.");    }    if (THTensor_(nDimension)(input) > 2) {        THError("input tensor should be 1D or 2D");    }    target = THTensor_(newContiguous)(target);    weights = weights ? THTensor_(newContiguous)(weights) : NULL;    real *target_data = THTensor_(data)(target);    real *weights_data = weights ? THTensor_(data)(weights) : NULL;    real *gradInput_data = THTensor_(data)(gradInput);    if (THTensor_(nDimension)(input) == 1) {        int t;        for (t=0; t < n_classes; t++) {            real cur_weight = weights ? weights_data[t] : 1.0f;            gradInput_data[t] = -cur_weight * target_data[t];            if (sizeAverage && *total_weight_data) {                gradInput_data[t] /= *total_weight_data;            }        }    } else if (THTensor_(nDimension)(input) == 2) {        int batch_size = THTensor_(size)(input, 0);        THAssert(THTensor_(size)(target, 0) == batch_size);        int i,t;        for (i = 0; i < batch_size; i++){            for (t=0; t < n_classes; t++) {                real cur_weight = weights ? weights_data[t] : 1.0f;                gradInput_data[i * n_classes + t] = -cur_weight * target_data[i * n_classes + t];                if (sizeAverage && *total_weight_data) {                    gradInput_data[i * n_classes + t] /= *total_weight_data;                }            }        }    }    THTensor_(free)(target);    if (weights) {        THTensor_(free)(weights);    }}#endif

3.MyCrossEntropyCriterionGPU.cu

1.torch/distro/extra/cunn/lib/THCUNN/MyCrossEntropyCriterionGPU.cu

#include "THCUNN.h"#include "common.h"#include "THCHalf.h"#include "THCHalfAutoNumerics.cuh"#include <stdio.h>#include <assert.h>static const int NTHREADS = 32;template <typename Dtype, typename Acctype>__global__ void cunn_MyCrossEntropyCriterionGPU_updateOutput_kernel1(Dtype *output,                                Dtype *total_weight,                                Dtype *input,                                Dtype *target,                                Dtype *weights,                                int size_average,                                int n_classes) {    assert(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);    int t;    *output = *total_weight = ScalarConvert<int, Dtype>::to(0);    Acctype outputAcc = 0;    Acctype total_weightAcc = 0;    for (t = 0; t < n_classes; t += 1) {        Dtype cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);        outputAcc -= input[t] * cur_weight * target[t];        total_weightAcc += cur_weight * target[t];    }    *total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);    *output = ScalarConvert<Acctype, Dtype>::to(outputAcc);    if (size_average && *total_weight > 0) {        *output /= *total_weight;    }}template <typename Dtype, typename Acctype>__global__ void cunn_MyCrossEntropyCriterionGPU_updateOutput_kernel(Dtype *output,                                Dtype *total_weight,                                Dtype *input,                                Dtype *target,                                Dtype *weights,                                int size_average,                                int nframe,                                int ndim,                                int n_classes) {    __shared__ Acctype shInputs[NTHREADS], acc_weight[NTHREADS];    int i, t;    Dtype cur_weight;    shInputs[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);    acc_weight[threadIdx.x] = ScalarConvert<int, Acctype>::to(0);    for (i = threadIdx.x; i < nframe; i += NTHREADS) {        for (t = 0; t < n_classes; t += 1) {            cur_weight = weights ? weights[t] : ScalarConvert<int, Dtype>::to(1);            shInputs[threadIdx.x] -= input[i * ndim + t] * cur_weight * target[i * ndim + t];            acc_weight[threadIdx.x] += cur_weight * target[i * ndim + t];        }    }    __syncthreads();    if (threadIdx.x == 0) {        *output = *total_weight = ScalarConvert<int, Dtype>::to(1);        Acctype outputAcc = 0;        Acctype total_weightAcc = 0;        for (i = 0; i < NTHREADS; ++i){            outputAcc += shInputs[i];            total_weightAcc += acc_weight[i];        }        *total_weight = ScalarConvert<Acctype, Dtype>::to(total_weightAcc);        *output = ScalarConvert<Acctype, Dtype>::to(outputAcc);        if (size_average && *total_weight > 0) {            *output = ScalarConvert<Acctype, Dtype>::to(outputAcc / total_weightAcc);        }    }}template <typename Dtype>__global__ void cunn_MyCrossEntropyCriterionGPU_updateGradInput_kernel1(    Dtype* gradInput,    Dtype* weights,    Dtype* target,    Dtype* total_weight,    int size_average,    int n_classes){    if (*total_weight <= 0) {        return;    }    Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);    int t;    for (t = 0; t < n_classes; t += 1) {        gradInput[t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * target[t] * norm;    }}template <typename Dtype>__global__ void cunn_MyCrossEntropyCriterionGPU_updateGradInput_kernel(    Dtype *gradInput,    Dtype *target,    Dtype *weights,    Dtype *total_weight,    int size_average,    int nframe,    int ndim,    int n_classes){    if (*total_weight <= 0) {        return;    }    int i, t;    Dtype norm = size_average ? (ScalarConvert<int, Dtype>::to(1) / *total_weight) : ScalarConvert<int, Dtype>::to(1);    for (i = threadIdx.x; i < nframe; i += NTHREADS) {        for (t = 0; t < n_classes; t += 1) {            gradInput[i * ndim + t] = -(weights ? weights[t] : ScalarConvert<int, Dtype>::to(1)) * target[i * ndim + t] * norm;        }    }}#include "generic/MyCrossEntropyCriterionGPU.cu"#include "THCGenerateFloatTypes.h"

2.torch/distro/extra/cunn/lib/THCUNN/generic/MyCrossEntropyCriterionGPU.cu

#ifndef THC_GENERIC_FILE#define THC_GENERIC_FILE "generic/MyCrossEntropyCriterionGPU.cu"#elsevoid THNN_(MyCrossEntropyCriterionGPU_updateOutput)(           THCState *state,           THCTensor *input,           THCTensor *target,           THCTensor *output,           bool sizeAverage,           THCTensor *weights,           THCTensor *total_weight) {  THCUNN_check_dim_size(state, output, 1, 0, 1);  THCUNN_check_dim_size(state, total_weight, 1, 0, 1);  int n_dims = THCTensor_(nDimension)(state, input);  int n_classes = THCTensor_(size)(state, input, n_dims - 1);  if (weights) {    THCUNN_assertSameGPU(      state, 5, input, target, weights, output, total_weight    );  } else {    THCUNN_assertSameGPU(      state, 4, input, target, output, total_weight    );  }  THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");  long batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);  /*long num_targets = THCudaLongTensor_size(state, target, 0);  THArgCheck(batch_size == num_targets,      2, "mismatch between the batch size of input (%ld) and that of target (%ld)",      batch_size, num_targets);*/  if (weights && THCTensor_(nElement)(state, weights) != n_classes) {    THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights);    THError("weight tensor should be defined either for all %d classes or no classes"            " but got weight tensor of shape: %s", n_classes, s1.str);  }  input = THCTensor_(newContiguous)(state, input);  weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;  target = THCTensor_(newContiguous)(state, target);  real *input_data = THCTensor_(data)(state, input);  real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;  real *target_data = THCTensor_(data)(state, target);  real *output_data = THCTensor_(data)(state, output);  real *total_weight_data = THCTensor_(data)(state, total_weight);  if (THCTensor_(nDimension)(state, input) == 1) {    cunn_MyCrossEntropyCriterionGPU_updateOutput_kernel1<real, accreal>      <<<1, 1, 0, THCState_getCurrentStream(state)>>>(        output_data,        total_weight_data,        input_data,        target_data,        weights_data,        sizeAverage,        n_classes    );  } else if (THCTensor_(nDimension)(state, input) == 2) {    cunn_MyCrossEntropyCriterionGPU_updateOutput_kernel<real, accreal>      <<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(        output_data,        total_weight_data,        input_data,        target_data,        weights_data,        sizeAverage,        THCTensor_(size)(state, input, 0),        THCTensor_(size)(state, input, 1),        n_classes    );  }  THCudaCheck(cudaGetLastError());  if (weights) {    THCTensor_(free)(state, weights);  }  THCTensor_(free)(state, target);  THCTensor_(free)(state, input);}void THNN_(MyCrossEntropyCriterionGPU_updateGradInput)(           THCState *state,           THCTensor *input,           THCTensor *target,           THCTensor *gradInput,           bool sizeAverage,           THCTensor *weights,           THCTensor *total_weight) {  int n_dims = THCTensor_(nDimension)(state, input);  int n_classes = THCTensor_(size)(state, input, n_dims - 1);  THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous");  if (weights) {    THCUNN_assertSameGPU(      state, 5, weights, input, target, gradInput, total_weight    );  }  else {    THCUNN_assertSameGPU(      state, 4, input, target, gradInput, total_weight    );  }  THArgCheck(n_dims <= 2 && n_dims > 0, 2, "vector or matrix expected");  long batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0);  /*long num_targets = THCudaLongTensor_size(state, target, 0);  THArgCheck(batch_size == num_targets,      2, "mismatch between the batch size of input (%ld) and that of target (%ld)",      batch_size, num_targets);*/  if (weights && THCTensor_(nElement)(state, weights) != n_classes) {    THError("weight tensor should be defined either for all or no classes");  }  weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL;  target = THCTensor_(newContiguous)(state, target);  real *weights_data = weights ? THCTensor_(data)(state, weights) : NULL;  real *gradInput_data = THCTensor_(data)(state, gradInput);  real *target_data = THCTensor_(data)(state, target);  real *total_weight_data = THCTensor_(data)(state, total_weight);  if (THCTensor_(nDimension)(state, input) == 1) {    cunn_MyCrossEntropyCriterionGPU_updateGradInput_kernel1<real>      <<<1, 1, 0, THCState_getCurrentStream(state)>>>(        gradInput_data,        weights_data,        target_data,        total_weight_data,        sizeAverage,        n_classes    );  } else {    cunn_MyCrossEntropyCriterionGPU_updateGradInput_kernel<real>      <<<1, NTHREADS, 0, THCState_getCurrentStream(state)>>>(        gradInput_data,        target_data,        weights_data,        total_weight_data,        sizeAverage,        THCTensor_(size)(state, input, 0),        THCTensor_(size)(state, input, 1),        n_classes    );  }  THCudaCheck(cudaGetLastError());  if (weights) {    THCTensor_(free)(state, weights);  }  THCTensor_(free)(state, target);}#endif

4.torch/distro/extra/nn/init.lua

require('nn.MyCrossEntropyCriterionGPU')

5.torch/distro/extra/nn/lib/THNN/init.c里添加

#include "generic/MyCrossEntropyCriterionGPU.c"#include "THGenerateFloatTypes.h"

6.torch/distro/extra/nn/lib/THNN/generic/THNN.h里添加

TH_API void THNN_(MyCrossEntropyCriterionGPU_updateOutput)(          THNNState *state,            // library's state          THTensor *input,             // input tensor (1D/2D)          THTensor *target,            // tensor containing soft target          THTensor *output,            // [OUT] a one-element tensor with loss          bool sizeAverage,            // if true, the loss will be normalized by batch size and class weights          THTensor *weights,           // [OPTIONAL] class weights          THTensor *total_weight);     // [BUFFER]TH_API void THNN_(MyCrossEntropyCriterionGPU_updateGradInput)(          THNNState *state,            // library's state          THTensor *input,             // input tensor (1D/2D)          THTensor *target,            // tensor containing soft targets          THTensor *gradInput,         // [OUT] gradient w.r.t. input          bool sizeAverage,            // if true, the loss will be normalized by batch size and class weights          THTensor *weights,           // [OPTIONAL] class weights          THTensor *total_weight);     // [BUFFER]

7.torch/distro/extra/cunn/lib/THCUNN/generic/THCUNN.h里添加

TH_API void THNN_(MyCrossEntropyCriterionGPU_updateOutput)(          THCState *state,          THCTensor *input,          THCTensor *target,          THCTensor *output,          bool sizeAverage,          THCTensor *weights,          THCTensor *total_weight);TH_API void THNN_(MyCrossEntropyCriterionGPU_updateGradInput)(          THCState *state,          THCTensor *input,          THCTensor *target,          THCTensor *gradInput,          bool sizeAverage,          THCTensor *weights,          THCTensor *total_weight);

8.build

cd torch/distro/extra/nnluarocks make rocks/nn-scm-1.rockspeccd torch/distro/extra/cunnluarocks make rocks/cunn-scm-1.rockspec

Test

test1-singlestep

require 'nn'require 'os'require 'rnn'require 'cutorch'require 'cunn'require 'MyCrossEntropyCriterion'for maskzero = 0,1 dofor data_include_zero = 0,1 do--maskzero = 1--data_include_0 = 0sizeAverage = true --sizeAverage = false--weight=torch.ones(3)weight=torch.Tensor{0.1,0.2,0.4}label="soft"--label="hard"print("=======================================================")print(string.format("use maskzero:%d. data includes all-zero:%d.", maskzero, data_include_zero))print(string.format("sizeAverage:%s. label_type:%s", tostring(sizeAverage),label))print("=======================================================")if data_include_zero == 1 then    y_ = torch.Tensor{{-1.20397280433,-2.30258509299,-0.51082562376},{-2.30258509299,-0.22314355131,-2.30258509299},{0,0,0}} --{log0.3,log0.1,log0.6},{log0.1,log0.8,log0.1}    y_ = torch.Tensor{{1,2,3},{3,5,1},{0,0,0}}    y_h = torch.Tensor{3,2,1}    y_d = torch.Tensor{{0,0,1},{0,1,0},{1,0,0}} --y_s[3] can be any tensor    y_s = torch.Tensor{{0.1,0.3,0.6},{0.2,7,0.1},{0,0,0}}else    y_ = torch.Tensor{{-1.20397280433,-2.30258509299,-0.51082562376},{-2.30258509299,-0.22314355131,-2.30258509299}} --{log0.3,log0.1,log0.6},{log0.1,log0.8,log0.1}    y_ = torch.Tensor{{1,2,3},{3,5,1}}    y_h = torch.Tensor{3,2}    y_d = torch.Tensor{{0,0,1},{0,1,0}}    y_s = torch.Tensor{{0.1,0.3,0.6},{0.2,0.7,0.1}}endif maxzero == 1 then    c1 = nn.MyCrossEntropyCriterion(weight)    c1.maskZero = true     c1.sizeAverage = sizeAverage    c2_t = nn.ClassNLLCriterion(weight)    c2_t.sizeAverage = sizeAverage        c2 = nn.MaskZeroCriterion(c2_t,1)    c3_t = nn.MyCrossEntropyCriterionGPU(weight)    c3_t.sizeAverage = sizeAverage    c3 = nn.MaskZeroCriterion(c3_t,1)else    c1 = nn.MyCrossEntropyCriterion(weight)    c1.sizeAverage = sizeAverage        c2 = nn.ClassNLLCriterion(weight)        c2.sizeAverage = sizeAverage    c3 = nn.MyCrossEntropyCriterionGPU(weight)    c3.sizeAverage = sizeAverageendy_l=y_sif label=="hard" then    y_l=y_dend--cpuprint("cpu")o1 = c1:forward(y_,y_l)o2 = c2:forward(y_,y_h)o3 = c3:forward(y_,y_l)print(o1-o3)if label=="hard" then    print(o2-o1,o2-o3)endo1b = c1:backward(y_,y_l)o2b = c2:backward(y_,y_h)o3b = c3:backward(y_,y_l)print(o1b-o3b)if label=="hard" then    print(o2b-o1b,o2b-o3b)end--gpuprint("gpu")c2_gpu=c2:clone()c3_gpu=c3:clone()c2_gpu:cuda()c3_gpu:cuda()o2_gpu = c2_gpu:forward(y_:cuda(),y_h:cuda())o3_gpu = c3_gpu:forward(y_:cuda(),y_l:cuda())print(o1-o3_gpu)if label=="hard" then    print(o2_gpu-o3_gpu)endo2b_gpu = c2_gpu:backward(y_:cuda(),y_h:cuda())o3b_gpu = c3_gpu:backward(y_:cuda(),y_l:cuda())print(o1b-o3b_gpu:double())if label=="hard" then    print(o2b_gpu-o3b_gpu)endendend

test2-sequence

require 'nn'require 'os'require 'rnn'require 'cutorch'require 'cunn'require 'MyCrossEntropyCriterion'function CudaTabel2DoublTabel (table1)local table2={}for i = 1, #table1 do        table2[i]=table1[i]:double()endreturn table2endfunction DoublTabel2CudaTabel (table1)local table2={}for i = 1, #table1 do        table2[i]=table1[i]:cuda()endreturn table2endfor maskzero = 0,1 dofor data_include_zero = 0,1 do--maskzero = 1--data_include_0 = 0sizeAverage = true --sizeAverage = false--weight=torch.ones(3)weight=torch.Tensor{0.1,0.2,0.4}--label="soft"label="hard"print("=======================================================")print(string.format("use maskzero:%d. data includes all-zero:%d.", maskzero, data_include_zero))print(string.format("sizeAverage:%s. label_type:%s", tostring(sizeAverage),label))print("=======================================================")if data_include_zero == 1 then        y_1 = torch.Tensor{{1,2,3},{0,0,0},{0,0,0}}        y_h1 = torch.Tensor{3,0,0}        y_d1 = torch.Tensor{{0,0,1},{0,0,0},{0,0,0}}        y_h1 = torch.Tensor{3,1,1}        y_d1 = torch.Tensor{{0,0,1},{1,0,0},{1,0,0}}    y_s1 = torch.Tensor{{0.1,0.2,0.7},{0,0,0},{0,0,0}}        y_2 = torch.Tensor{{23,17,3},{2,41,7},{0,0,0}}        y_h2 = torch.Tensor{1,2,0}        y_d2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,0}}        y_h2 = torch.Tensor{1,2,3}        y_d2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,1}}    y_s2 = torch.Tensor{{0.8,0.15,0.05},{0.02,0.9,0.18},{0,0,0}}        y_3 = torch.Tensor{{11,32,1},{1,45,34},{12,2,6}}        y_h3 = torch.Tensor{2,2,1}        y_d3 = torch.Tensor{{0,1,0},{0,1,0},{1,0,0}}    y_s3 = torch.Tensor{{0.25,0.7,0.05},{0.02,0.9,0.18},{0.6,0.1,0.3}}else        y_1 = torch.Tensor{{1,2,3},{14,12,3},{33,2,7}}        y_h1 = torch.Tensor{3,1,1}        y_d1 = torch.Tensor{{0,0,1},{1,0,0},{1,0,0}}        y_s1 = torch.Tensor{{0.1,0.2,0.7},{0.6,0.2,0.2},{0.44,0.2,0.36}}        y_2 = torch.Tensor{{23,17,3},{2,41,7},{8,23,51}}        y_h2 = torch.Tensor{1,2,3}        y_d2 = torch.Tensor{{1,0,0},{0,1,0},{0,0,1}}        y_s2 = torch.Tensor{{0.8,0.15,0.05},{0.02,0.9,0.18},{0.2,0.3,0.5}}        y_3 = torch.Tensor{{11,32,1},{1,45,34},{12,2,6}}        y_h3 = torch.Tensor{2,2,1}        y_d3 = torch.Tensor{{0,1,0},{0,1,0},{1,0,0}}        y_s3 = torch.Tensor{{0.25,0.7,0.05},{0.02,0.9,0.18},{0.6,0.1,0.3}}endy_ = {}y_d = {}y_s = {}y_h = {}model=nn.MaskZero(nn.LogSoftMax(),1)table.insert(y_,model:forward(y_1))table.insert(y_d,y_d1)table.insert(y_s,y_s1)table.insert(y_h,y_h1)table.insert(y_,model:forward(y_2))table.insert(y_d,y_d2)table.insert(y_s,y_s2)table.insert(y_h,y_h2)table.insert(y_,model:forward(y_3))table.insert(y_d,y_d3)table.insert(y_s,y_s3)table.insert(y_h,y_h3)seq=3if maxzero == 1 then    c1_single = nn.MyCrossEntropyCriterion(weight)    c1_single.maskZero = true     c1_single.sizeAverage = sizeAverage    c2_t = nn.ClassNLLCriterion(weight)    c2_t.sizeAverage = sizeAverage        c2_single = nn.MaskZeroCriterion(c2_t,1)    c3_t = nn.MyCrossEntropyCriterionGPU(weight)    c3_t.sizeAverage = sizeAverage    c3_single = nn.MaskZeroCriterion(c3_t,1)else    c1_single = nn.MyCrossEntropyCriterion(weight)    c1_single.sizeAverage = sizeAverage        c2_single = nn.ClassNLLCriterion(weight)        c2_single.sizeAverage = sizeAverage    c3_single = nn.MyCrossEntropyCriterionGPU(weight)    c3_single.sizeAverage = sizeAverageendc1=nn.SequencerCriterion(c1_single)c2=nn.SequencerCriterion(c2_single)c3=nn.SequencerCriterion(c3_single)y_l=y_sif label=="hard" then    y_l=y_dend--cpuprint("cpu")o1 = c1:forward(y_,y_l)o2 = c2:forward(y_,y_h)o3 = c3:forward(y_,y_l)print(o1-o3)if label=="hard" then    print(o2-o1,o2-o3)endo1b = c1:backward(y_,y_l)o2b = c2:backward(y_,y_h)o3b = c3:backward(y_,y_l)for i=1,seq do    print(o1b[i]-o3b[i])    if label=="hard" then        print(o2b[i]-o1b[i],o2b[i]-o3b[i])    endend--gpuprint("gpu")c2_gpu=c2:clone()c3_gpu=c3:clone()c2_gpu:cuda()c3_gpu:cuda()o2_gpu = c2_gpu:forward(DoublTabel2CudaTabel(y_),DoublTabel2CudaTabel(y_h))o3_gpu = c3_gpu:forward(DoublTabel2CudaTabel(y_),DoublTabel2CudaTabel(y_l))print(o1-o3_gpu)if label=="hard" then    print(o2_gpu-o3_gpu)endo2b_gpu = c2_gpu:backward(DoublTabel2CudaTabel(y_),DoublTabel2CudaTabel(y_h))o3b_gpu = c3_gpu:backward(DoublTabel2CudaTabel(y_),DoublTabel2CudaTabel(y_l))for i=1,seq do    print(o1b[i]-o3b_gpu[i]:double())    if label=="hard" then        print(o2b_gpu[i]-o3b_gpu[i])    endendendend
0 0
原创粉丝点击