batch slice layer
来源:互联网 发布:python抓取json数据 编辑:程序博客网 时间:2024/06/10 11:57
这个层的作用是将bottom分为k个tops
列如k为3,bottom为n1,n2,n3,n4,n5,n6。
则top1为n1,n4
则top2为n2,n5
则top3为n3,n6
batch_slice_layer.hpp
#ifndef CAFFE_BATCH_SLICE_LAYER_HPP_#define CAFFE_BATCH_SLICE_LAYER_HPP_#include "caffe/blob.hpp"#include "caffe/layer.hpp"#include "caffe/proto/caffe.pb.h"namespace caffe {template<typename Dtype>class BatchSliceLayer : public Layer<Dtype> {public: explicit BatchSliceLayer(const LayerParameter& param) : Layer<Dtype>(param) {} virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual inline const char* type() const { return "BatchSlice"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int MinTopBlobs() const { return 1; }protected: virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); int count_; int slice_size_;};}//namespace#endif
batch_slice_layer.cpp
#include "caffe/layers/batch_slice_layer.hpp"#include <vector>namespace caffe {template <typename Dtype>void BatchSliceLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(bottom[0]->num()%top.size(), 0) <<"error!"; count_ = bottom[0]->num()/top.size();}template <typename Dtype>void BatchSliceLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> shape; shape.push_back(count_); shape.push_back(bottom[0]->channels()); shape.push_back(bottom[0]->height()); shape.push_back(bottom[0]->width()); slice_size_ = shape[1]*shape[2]*shape[3]; int top_num = top.size(); for(int i = 0; i < top_num; ++i){ top[i]->Reshape(shape); }}template <typename Dtype>void BatchSliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); int top_size = top.size(); for(int i = 0; i < top_size; ++i){ Dtype* top_data = top[i]->mutable_cpu_data(); int top_offset, bottom_offset; for(int j = 0; j < count_; ++j){ top_offset = j*slice_size_; bottom_offset = (j*top_size+i)*slice_size_; caffe_copy(slice_size_, bottom_data + bottom_offset, top_data + top_offset); } }}template <typename Dtype>void BatchSliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){ Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); int top_size = top.size(); for(int i = 0; i < top_size; ++i){ const Dtype* top_diff = top[i]->cpu_diff(); int top_offset, bottom_offset; for(int j = 0; j < count_; ++j){ top_offset = j*slice_size_; bottom_offset = (j*top_size+i)*slice_size_; caffe_copy(slice_size_, top_diff + top_offset, bottom_diff + bottom_offset); } }}#ifdef CPU_ONLYSTUB_GPU(BatchSliceLayer);#endifINSTANTIATE_CLASS(BatchSliceLayer);REGISTER_LAYER_CLASS(BatchSlice);}//namespace caffe
batch_slice_layer.cu
#include "caffe/layers/batch_slice_layer.hpp"namespace caffe {template <typename Dtype>void BatchSliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); int top_size = top.size(); for(int i = 0; i < top_size; ++i){ Dtype* top_data = top[i]->mutable_gpu_data(); int top_offset, bottom_offset; for(int j = 0; j < count_; ++j){ top_offset = j*slice_size_; bottom_offset = (j*top_size+i)*slice_size_; caffe_copy(slice_size_, bottom_data + bottom_offset, top_data + top_offset); } }}template <typename Dtype>void BatchSliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int top_size = top.size(); for(int i = 0; i < top_size; ++i){ const Dtype* top_diff = top[i]->gpu_diff(); int top_offset, bottom_offset; for(int j = 0; j < count_; ++j){ top_offset = j*slice_size_; bottom_offset = (j*top_size+i)*slice_size_; caffe_copy(slice_size_, top_diff + top_offset, bottom_diff + bottom_offset); } }}INSTANTIATE_LAYER_GPU_FUNCS(BatchSliceLayer);}//namespace caffe
0 0
- batch slice layer
- H264 slice layer
- caffe slice layer 学习
- caffe-slice layer
- Batch Normalization & Layer Normalization
- keras slice layer 层 实现
- caffe改进:prelu layer,cudnn batch norm layer,以及convolution depthwise separable layer
- Batch Normalization & Layer Normalization整理(代码实现下载)
- Deep Learning 1 : Batch Normalization,Weight Normalization and Layer Normalization
- slice
- slice
- [].slice
- slice
- slice
- slice
- slice
- Slice
- batch
- 周金涛:人生就是一次康波
- Network In Network笔记-ICLR 2014
- Tomcat 显示日志
- SSM框架重构达内NETCTOSS项目——(4)处理异常
- 检测点2.1
- batch slice layer
- subline Text3 安装 PackControl 报错解决方案
- Android内存优化(上)
- MySQL主主复制以及常见的一些问题
- 2663: [Beijing wc2012]灵魂宝石
- ssh使用的是GB2312编
- Android Studio发布项目到Maven私有库(Nexus)
- amp
- (四)静态库和动态库的生成和使用(windows(基于vs)和Linux(基于Ubuntu))