【深度学习】caffe之pooling层
来源:互联网 发布:c语言源代码及注解 编辑:程序博客网 时间:2024/05/22 01:37
PoolingLayer
LayerSetUp
//主要对pooling的kernel、pad、stride进行初始化template <typename Dtype>void PoolingLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param();//获取pooling参数 if (pool_param.global_pooling()) {//是否全局pooling CHECK(!(pool_param.has_kernel_size() || pool_param.has_kernel_h() || pool_param.has_kernel_w())) << "With Global_pooling: true Filter size cannot specified"; } else { CHECK(!pool_param.has_kernel_size() != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; CHECK(pool_param.has_kernel_size() || (pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; } CHECK((!pool_param.has_pad() && pool_param.has_pad_h() && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; CHECK((!pool_param.has_stride() && pool_param.has_stride_h() && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; global_pooling_ = pool_param.global_pooling(); if (global_pooling_) {//如果全局pooling,则kernel=bottom blob kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } else {//如果只输入一个参数,则kernel_h_ = kernel_w_ if (pool_param.has_kernel_size()) { kernel_h_ = kernel_w_ = pool_param.kernel_size(); } else {//否则kernel_h_ 与 kernel_w_分别赋值 kernel_h_ = pool_param.kernel_h(); kernel_w_ = pool_param.kernel_w(); } } CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; if (!pool_param.has_pad_h()) {//pad是否分别赋值 pad_h_ = pad_w_ = pool_param.pad(); } else { pad_h_ = pool_param.pad_h(); pad_w_ = pool_param.pad_w(); } if (!pool_param.has_stride_h()) {//stride是否分别赋值 stride_h_ = stride_w_ = pool_param.stride(); } else { stride_h_ = pool_param.stride_h(); stride_w_ = pool_param.stride_w(); } if (global_pooling_) { CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) << "With Global_pooling: true; only pad = 0 and stride = 1"; } if (pad_h_ != 0 || pad_w_ != 0) { CHECK(this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_AVE || this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) << "Padding implemented only for average and max pooling."; CHECK_LT(pad_h_, kernel_h_); CHECK_LT(pad_w_, kernel_w_); }}
Reshape
template <typename Dtype>void PoolingLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " << "corresponding to (num, channels, height, width)"; channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); if (global_pooling_) {//如果全局pooling,则kernel=bottom kernel_h_ = bottom[0]->height(); kernel_w_ = bottom[0]->width(); } pooled_height_ = static_cast<int>(ceil(static_cast<float>(//经过pad之后的输出矩阵height height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; pooled_width_ = static_cast<int>(ceil(static_cast<float>(//经过pad之后的输出矩阵width width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { --pooled_height_; } if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { --pooled_width_; } CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,//top的shape为n*c*pooled_height_*pooled_width_ pooled_width_); if (top.size() > 1) { top[1]->ReshapeLike(*top[0]); } // If max pooling, we will initialize the vector index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); } // If stochastic pooling, we will initialize the random index part. if (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_STOCHASTIC) { rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); }}
Forward_cpu
template <typename Dtype>void PoolingLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int top_count = top[0]->count();//输出总数 // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; // suppress warnings about uninitalized variables Dtype* top_mask = NULL; // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more code. switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX://max-pooling // Initialize if (use_top_mask) { top_mask = top[1]->mutable_cpu_data(); caffe_set(top_count, Dtype(-1), top_mask); } else { mask = max_idx_.mutable_cpu_data(); caffe_set(top_count, -1, mask);//首先将max-pooling中的mask设为-1 } caffe_set(top_count, Dtype(-FLT_MAX), top_data);//将输出设为最小值 // The main loop for (int n = 0; n < bottom[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) {//从输出矩阵的每一个点映射到输入的一个kernel大小的矩形块 for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_); int wend = min(wstart + kernel_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); const int pool_index = ph * pooled_width_ + pw; for (int h = hstart; h < hend; ++h) {//对kernel大小的矩形块做max-pooling for (int w = wstart; w < wend; ++w) { const int index = h * width_ + w; if (bottom_data[index] > top_data[pool_index]) { top_data[pool_index] = bottom_data[index]; if (use_top_mask) { top_mask[pool_index] = static_cast<Dtype>(index); } else { mask[pool_index] = index;//mask记录的是矩形块中最大值的位置 } } } } } } // compute offset bottom_data += bottom[0]->offset(0, 1);//一个通道pooling完之后,计算下一个通道的offset top_data += top[0]->offset(0, 1); if (use_top_mask) { top_mask += top[0]->offset(0, 1); } else { mask += top[0]->offset(0, 1); } } } break; case PoolingParameter_PoolMethod_AVE://average-pooling for (int i = 0; i < top_count; ++i) {//首先将输出赋值0 top_data[i] = 0; } // The main loop for (int n = 0; n < bottom[0]->num(); ++n) {//同上 for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_ + pad_h_); int wend = min(wstart + kernel_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height_); wend = min(wend, width_); for (int h = hstart; h < hend; ++h) {//累加一个kernel大小的矩形块 for (int w = wstart; w < wend; ++w) { top_data[ph * pooled_width_ + pw] += bottom_data[h * width_ + w]; } } top_data[ph * pooled_width_ + pw] /= pool_size;//average } } // compute offset bottom_data += bottom[0]->offset(0, 1);//同上 top_data += top[0]->offset(0, 1); } } break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; }}
Backward_cpu
template <typename Dtype>void PoolingLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more codes. caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);//首先将bottom_diff赋值0 // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; // suppress warnings about uninitialized variables const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX://max-pooling // The main loop if (use_top_mask) { top_mask = top[1]->cpu_data(); } else { mask = max_idx_.cpu_data(); } for (int n = 0; n < top[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { const int index = ph * pooled_width_ + pw; const int bottom_index = use_top_mask ? top_mask[index] : mask[index]; bottom_diff[bottom_index] += top_diff[index];//将输入矩形块中的最大值所对应的bottom_diff与输出中所对应的top_diff累加 } } bottom_diff += bottom[0]->offset(0, 1);//对所有通道都如此操作 top_diff += top[0]->offset(0, 1); if (use_top_mask) { top_mask += top[0]->offset(0, 1); } else { mask += top[0]->offset(0, 1); } } } break; case PoolingParameter_PoolMethod_AVE: // The main loop for (int n = 0; n < top[0]->num(); ++n) { for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { int hstart = ph * stride_h_ - pad_h_; int wstart = pw * stride_w_ - pad_w_; int hend = min(hstart + kernel_h_, height_ + pad_h_); int wend = min(wstart + kernel_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0);//输出中一个点所对应输入中的矩形块 wstart = max(wstart, 0); hend = min(hend, height_); wend = min(wend, width_); for (int h = hstart; h < hend; ++h) {//对矩形块对应的bottom_diff全部赋值为top_diff/矩形块大小 for (int w = wstart; w < wend; ++w) { bottom_diff[h * width_ + w] += top_diff[ph * pooled_width_ + pw] / pool_size; } } } } // offset bottom_diff += bottom[0]->offset(0, 1);//对所有通道都如此操作 top_diff += top[0]->offset(0, 1); } } break; case PoolingParameter_PoolMethod_STOCHASTIC: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown pooling method."; }}
0 0
- 【深度学习】caffe之pooling层
- caffe之pooling层
- 深度学习之caffe Layers-Pooling池化层
- caffe之(二)pooling层
- 深度学习---之pooling层的作用与缺陷
- caffe学习笔记24-Pooling层学习
- 深度学习之caffe Layers-Convolution层
- 【深度学习】caffe之卷积层
- 深度学习之caffe的BN层
- DL学习笔记【7】caffe参数调节-Pooling层
- caffe用python设置网络的Convolution层Pooling层和LRN--caffe学习(4))
- 深度学习之caffe
- 深度学习之----caffe
- 深度学习之caffe Loss
- 深度学习之caffe Solver
- 深度学习之Caffe框架
- 深度学习之---Caffe(一)
- [caffe]深度学习之图像分类模型AlexNe(各层feature map)解读
- pycharm中安装第三方软件库
- 论一枚小小程序员的重生
- 如何在datagrid中将表中的数值进行小数点位数保留
- sourcetree(git)不识别新添加的文件
- oracle 安全基线检查
- 【深度学习】caffe之pooling层
- 遇到PDF Transformer+转换的图像字体小了该怎么调整
- Python 正三角 倒三角 菱形 源代码
- Tomcat实现多域名配置
- HtmlToPdf保存网页成pdf教程
- count(*)/count(列) 谁更快
- mybatis foreach里用bind只获取集合的最后一个元素问题
- 从菜鸟到高手,R语言书单都在这!
- mysqldump导入导出mysql数据库