caffe自定义神经层

来源:互联网 发布:交易圣经知乎 编辑:程序博客网 时间:2024/05/01 20:37
一、步骤:

1. 创建新定义的头文件include/caffe/layers/my_neuron_layer.hpp
重新Layer名的方法:virtual inline const char*  type() const { return "MyNeuron"; }
如果只是需要cpu方法的话,可以注释掉forward/backward_gpu()这两个方法

my_neuron_layer.hpp
#ifndef CAFFE_MY_NEURON_LAYER_HPP_
#define CAFFE_MY_NEURON_LAYER_HPP_

#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/neuron_layer.hpp"

namespace caffe {
   template <typename Dtype>

   class MyNeuronLayer : public NeuronLayer<Dtype> {
       public:
       explicit MyNeuronLayer(const LayerParameter& param):NeuronLayer<Dtype>(param) {}
       virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);

       virtual inline const char* type() const { return "MyNeuron"; }

        protected:
        virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);

        #virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);

        virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

        #virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>&propagate_down,
const vector<Blob<Dtype>*>& bottom);
        Dtype power_;
    };
}  
#endif  // CAFFE_MY_NEURON_LAYER_HPP_




2. 创建对应src/caffe/layers/my_neuron_layer.cpp的源文件
重写方法LayerSetUp,实现从能从prototxt读取参数
重写方法Reshape,如果对继承类没有修改的话,就不需要重写
重写方法Forward_cpu
重写方法Backward_cpu(非必须)
*如果要GPU支持则还需要创建src/caffe/src/my_neuron_layer.cu同理重写方Forward_gpu/Backward_gpu(非必须)

my_neuron_layer.cpp
#include <vector>
#include "caffe/layers/my_neuron_layer.hpp"   //需要包含新定义的头文件
#include "caffe/util/math_functions.hpp"

namespace caffe {
   template <typename Dtype>
   void MyNeuronLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top){
   NeuronLayer<Dtype>::LayerSetUp(bottom,top);
   power_ = this->layer_param_.my_neuron_param().power();
   }


   // Compute y = x^power
   template <typename Dtype>

   void MyNeuronLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,const

       vector<Blob<Dtype>*>&top){

       Dtype* top_data = top[0]->mutable_cpu_data();
       const int count = bottom[0]->count();
       caffe_powx(count, bottom[0]->cpu_data(), Dtype(power_), top_data);
    }



     template <typename Dtype>
     void MyNeuronLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>&       propagate_down,const vector<Blob<Dtype>*>& bottom){
          const int count = top[0]->count();
          const Dtype* top_diff = top[0]->cpu_diff();
          if(propagate_down[0]){
              const Dtype* bottom_data = bottom[0]->cpu_data();
              Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
              caffe_powx(count, bottom_data, Dtype(power_ - 1), bottom_diff);
              caffe_scal(count, Dtype(power_), bottom_diff);
              caffe_mul(count, bottom_diff, top_diff, bottom_diff);
          }
      }


      //#ifdef CPU_ONLY
      //STUB_GPU(MyNeuronLayer);
      //#endif

      INSTANTIATE_CLASS(MyNeuronLayer);
      REGISTER_LAYER_CLASS(MyNeuron);

}


my_neuron_layer.cu

#include <vector>
#include "caffe/layers/mysquare_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <iostream>
using namespace std;

namespace caffe {
   template <typename Dtype>
   void MyNeuronLayer<Dtype>::Forward_gpu(
   const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
        const int count = top[0]->count();
        Dtype* top_data = top[0]->mutable_gpu_data();
        caffe_gpu_powx(count, bottom[0]->gpu_data(), Dtype(power_), top_data);
   }


    template <typename Dtype>
    void MyNeuronLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
        const int count = top[0]->count();
        const Dtype* top_diff = top[0]->gpu_diff();
        if (propagate_down[0]) {
             const Dtype* bottom_data = bottom[0]->gpu_data();
             Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
             const Dtype* bottom_data_w = bottom[0]->cpu_data();
             const Dtype* bottom_diff_w = bottom[0]->cpu_diff();


             cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
             cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;

             caffe_gpu_powx(count, bottom_data, Dtype(power_ - 1), bottom_diff);


              bottom_diff = bottom[0]->mutable_gpu_diff();
              bottom_data_w = bottom[0]->cpu_data();
              bottom_diff_w = bottom[0]->cpu_diff();
              cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
              cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;

              caffe_gpu_scal(count, Dtype(power_), bottom_diff);

              bottom_diff = bottom[0]->mutable_gpu_diff();
              bottom_data_w = bottom[0]->cpu_data();
              bottom_diff_w = bottom[0]->cpu_diff();
              cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
              cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
              caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
              bottom_diff = bottom[0]->mutable_gpu_diff();
              bottom_data_w = bottom[0]->cpu_data();
              bottom_diff_w = bottom[0]->cpu_diff();
              cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
              cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
         }
     }

INSTANTIATE_LAYER_GPU_FUNCS(MyNeuronLayer);
}  


3. proto/caffe.proto注册新的Layer

message LayerParameter{

   ...

   ++ optional MyNeuronParameter my_neuron_param = 150;

   ...

}


...


++ message MyNeuronParameter {

   ++  optional float power = 1 [default = 2];

   }

... 


message V1LayerParameter{

   ...

   ++ MYNEURON = 40;

   ...

}


4. my_neuron_layer.cpp添加注册的宏定义

INSTANTIATE_CLASS(MyNeuronLayer);

REGISTER_LAYER_CLASS(MyNeuron);

如果有my_neuron_layer.cu,则添加

INSTANTIATE_LAYER_GPU_FUNCS(MyNeuronLayer);


5. 重新编译和install
# cd caffe/build/
# cmake -D CPU_ONLY=ON -D CMAKE_PREFIX_INSTALL=/usr/local ..

# make all

# make install


二、测试自定义的Layer

定义deploy.prototxt

name: "CaffeNet"

input: "data"

input_shape {

 dim: 1 # batchsize

 dim: 1 # number of colour channels - rgb

 dim: 28 # width

 dim: 28 # height

}

layer {

 name: "myneuron"

 type: "MyNeuron"

 bottom: "data"

 top: "data_out"

 my_neuron_param {

    power : 2

 }

}


运行测试程序:
test_my_neuron.py

原创粉丝点击