caffe自定义神经层
来源:互联网 发布:交易圣经知乎 编辑:程序博客网 时间:2024/05/01 20:37
#define CAFFE_MY_NEURON_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/neuron_layer.hpp"
namespace caffe {
template <typename Dtype>
class MyNeuronLayer : public NeuronLayer<Dtype> {
public:
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "MyNeuron"; }
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
#virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Dtype power_;
};
}
#include "caffe/layers/my_neuron_layer.hpp" //需要包含新定义的头文件
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void MyNeuronLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top){
NeuronLayer<Dtype>::LayerSetUp(bottom,top);
power_ = this->layer_param_.my_neuron_param().power();
}
// Compute y = x^power
template <typename Dtype>
void MyNeuronLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,const
vector<Blob<Dtype>*>&top){
Dtype* top_data = top[0]->mutable_cpu_data();const int count = bottom[0]->count();
caffe_powx(count, bottom[0]->cpu_data(), Dtype(power_), top_data);
}
template <typename Dtype>
void MyNeuronLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom){
const int count = top[0]->count();
const Dtype* top_diff = top[0]->cpu_diff();
if(propagate_down[0]){
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_powx(count, bottom_data, Dtype(power_ - 1), bottom_diff);
caffe_scal(count, Dtype(power_), bottom_diff);
caffe_mul(count, bottom_diff, top_diff, bottom_diff);
}
}
//#ifdef CPU_ONLY
//STUB_GPU(MyNeuronLayer);
//#endif
INSTANTIATE_CLASS(MyNeuronLayer);
REGISTER_LAYER_CLASS(MyNeuron);
}
my_neuron_layer.cu
#include <vector>
#include "caffe/layers/mysquare_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <iostream>
using namespace std;
namespace caffe {
template <typename Dtype>
void MyNeuronLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_powx(count, bottom[0]->gpu_data(), Dtype(power_), top_data);
}
template <typename Dtype>
void MyNeuronLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data_w = bottom[0]->cpu_data();
const Dtype* bottom_diff_w = bottom[0]->cpu_diff();
cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
caffe_gpu_powx(count, bottom_data, Dtype(power_ - 1), bottom_diff);
bottom_diff = bottom[0]->mutable_gpu_diff();
bottom_data_w = bottom[0]->cpu_data();
bottom_diff_w = bottom[0]->cpu_diff();
cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
caffe_gpu_scal(count, Dtype(power_), bottom_diff);
bottom_diff = bottom[0]->mutable_gpu_diff();
bottom_data_w = bottom[0]->cpu_data();
bottom_diff_w = bottom[0]->cpu_diff();
cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
bottom_diff = bottom[0]->mutable_gpu_diff();
bottom_data_w = bottom[0]->cpu_data();
bottom_diff_w = bottom[0]->cpu_diff();
cout << "bottom_data[0]: " << bottom_data_w[0] << endl;
cout << "bottom_diff[0]: " << bottom_diff_w[0] << endl;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MyNeuronLayer);
}
message LayerParameter{
...
++ optional MyNeuronParameter my_neuron_param = 150;
...
}
...
++ message MyNeuronParameter {
++ optional float power = 1 [default = 2];
}
...
message V1LayerParameter{
...
++ MYNEURON = 40;
...
}
INSTANTIATE_CLASS(MyNeuronLayer);
REGISTER_LAYER_CLASS(MyNeuron);
如果有my_neuron_layer.cu,则添加
INSTANTIATE_LAYER_GPU_FUNCS(MyNeuronLayer);
# make all
# make install
二、测试自定义的Layer
name: "CaffeNet"
input: "data"
input_shape {
dim: 1 # batchsize
dim: 1 # number of colour channels - rgb
dim: 28 # width
dim: 28 # height
}
layer {
name: "myneuron"
type: "MyNeuron"
bottom: "data"
top: "data_out"
my_neuron_param {
power : 2
}
}
test_my_neuron.py
- caffe自定义神经层
- 【深度学习框架Caffe学习与应用】第五课 自定义神经层和数据输入层
- caffe自定义层
- caffe自定义Neuron层
- Caffe 自定义数据输入层
- Caffe添加自定义的层
- Caffe添加自定义层-自定义loss
- tensorflow 建造神经层
- 如何在caffe中自定义网络层
- caffe源码学习(六) 自定义层
- 如何在caffe中自定义网络层
- 在caffe中添加自定义层
- caffe 10 win10 使用python自定义层
- caffe之SoftmaxWithLoss层 自定义实现
- <Learning Transferable Features with Deep Adaptation Networks>caffe 添加MMDLoss层(caffe 自定义网络层)
- Caffe学习5-用C++自定义层以及可视化结果
- caffe 自定义层/添加自己的损失函数
- (16)caffe总结之自定义数据输入层
- 404,500等HTTP状态码表示的含义
- Oracle学前班
- Linux功耗管理(7)_Wakeup events framework
- HDU6128Inverse of sum
- Oracle数据库exp与expdp方式导入导出时改变数据库表空间及用户信息
- caffe自定义神经层
- codeforces 24C 找规律
- Python 字符串函数
- VS2010/MFC MFC 常用类:定时器 Timer
- 「网络流 24 题」运输问题
- 任务进度规划(每日更新)
- 73. Set Matrix Zeroes
- setsockopt(server_sockfd,SOL_SOCKET,SO_REUSEADDR,&j,sizeof(j));
- 编译原理工具推荐