batch slice layer

来源:互联网 发布:python抓取json数据 编辑:程序博客网 时间:2024/06/10 11:57

这个层的作用是将bottom分为k个tops
列如k为3,bottom为n1,n2,n3,n4,n5,n6。
则top1为n1,n4
则top2为n2,n5
则top3为n3,n6
batch_slice_layer.hpp

#ifndef CAFFE_BATCH_SLICE_LAYER_HPP_#define CAFFE_BATCH_SLICE_LAYER_HPP_#include "caffe/blob.hpp"#include "caffe/layer.hpp"#include "caffe/proto/caffe.pb.h"namespace caffe {template<typename Dtype>class BatchSliceLayer : public Layer<Dtype> {public:    explicit BatchSliceLayer(const LayerParameter& param)        : Layer<Dtype>(param) {}    virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,          const vector<Blob<Dtype>*>& top);    virtual void Reshape(const vector<Blob<Dtype>*>& bottom,          const vector<Blob<Dtype>*>& top);    virtual inline const char* type() const { return "BatchSlice"; }    virtual inline int ExactNumBottomBlobs() const { return 1; }    virtual inline int MinTopBlobs() const { return 1; }protected:    virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top);    virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top);    virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);    virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);    int count_;    int slice_size_;};}//namespace#endif

batch_slice_layer.cpp

#include "caffe/layers/batch_slice_layer.hpp"#include <vector>namespace caffe {template <typename Dtype>void BatchSliceLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {    CHECK_EQ(bottom[0]->num()%top.size(), 0)            <<"error!";    count_ = bottom[0]->num()/top.size();}template <typename Dtype>void BatchSliceLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {    std::vector<int> shape;    shape.push_back(count_);    shape.push_back(bottom[0]->channels());    shape.push_back(bottom[0]->height());    shape.push_back(bottom[0]->width());    slice_size_ = shape[1]*shape[2]*shape[3];    int top_num = top.size();    for(int i = 0; i < top_num; ++i){        top[i]->Reshape(shape);    }}template <typename Dtype>void BatchSliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {    const Dtype* bottom_data = bottom[0]->cpu_data();    int top_size = top.size();    for(int i = 0; i < top_size; ++i){        Dtype* top_data = top[i]->mutable_cpu_data();        int top_offset, bottom_offset;        for(int j = 0; j < count_; ++j){            top_offset = j*slice_size_;            bottom_offset = (j*top_size+i)*slice_size_;            caffe_copy(slice_size_,                    bottom_data + bottom_offset, top_data + top_offset);        }    }}template <typename Dtype>void BatchSliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();    int top_size = top.size();    for(int i = 0; i < top_size; ++i){        const Dtype* top_diff = top[i]->cpu_diff();        int top_offset, bottom_offset;        for(int j = 0; j < count_; ++j){            top_offset = j*slice_size_;            bottom_offset = (j*top_size+i)*slice_size_;            caffe_copy(slice_size_,                    top_diff + top_offset, bottom_diff + bottom_offset);        }    }}#ifdef CPU_ONLYSTUB_GPU(BatchSliceLayer);#endifINSTANTIATE_CLASS(BatchSliceLayer);REGISTER_LAYER_CLASS(BatchSlice);}//namespace caffe

batch_slice_layer.cu

#include "caffe/layers/batch_slice_layer.hpp"namespace caffe {template <typename Dtype>void BatchSliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {    const Dtype* bottom_data = bottom[0]->gpu_data();    int top_size = top.size();    for(int i = 0; i < top_size; ++i){        Dtype* top_data = top[i]->mutable_gpu_data();        int top_offset, bottom_offset;        for(int j = 0; j < count_; ++j){            top_offset = j*slice_size_;            bottom_offset = (j*top_size+i)*slice_size_;            caffe_copy(slice_size_,                    bottom_data + bottom_offset, top_data + top_offset);        }    }}template <typename Dtype>void BatchSliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {    Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();    int top_size = top.size();    for(int i = 0; i < top_size; ++i){        const Dtype* top_diff = top[i]->gpu_diff();        int top_offset, bottom_offset;        for(int j = 0; j < count_; ++j){            top_offset = j*slice_size_;            bottom_offset = (j*top_size+i)*slice_size_;            caffe_copy(slice_size_,                    top_diff + top_offset, bottom_diff + bottom_offset);        }    }}INSTANTIATE_LAYER_GPU_FUNCS(BatchSliceLayer);}//namespace caffe
0 0
原创粉丝点击