Caffe框架源码剖析(4)—卷积层基类BaseConvolutionLayer

来源:互联网 发布:幽灵行动荒野优化如何 编辑:程序博客网 时间:2024/05/17 04:59

    数据层DataLayer正向传导的目标层是卷积层ConvolutionLayer。卷积层的是用一系列的权重滤波核与输入图像进行卷积,具体实现是通过将图像展开成向量,作用矩阵乘法实现卷积。

    同样,首先看一下卷积层的类图。



    BaseConvolutionLayer类是所有卷积层的基类,它负责初始化滤波核、将图像转成向量,以及封装了BLAS库的矩阵运算函数。Yangqing Jia在Convolution in Caffe: a memo 一文中说明了卷积的实现方式和优缺点:优点是速度快,缺点也很明显,内存开销大。也还有Alex Krizhevsky等人更好的实现方式,但是由于时间和精力原因并没有采用,毕竟是不想写博士论文意外创作的 偷笑

    ConvolutionLayer类是卷积层的实例类,如果系统支持cuDNN,则实例类为CuDNNConvolutionLayer。提醒一下,在CUDA计算能力3.0及以上的GPU上才支持cuDNN,这里CUDA GPUs 可以看到GPU的计算能力。如果当前GPU不支持cuDNN,Caffe又是默认开启cuDNN的,那么运行时构造CuDNNConvolutionLayer对象实例时将报错。这时需要在CommonSettings.props中将<UseCuDNN>标签改为false,或者在工程属性“C/C++”->“预处理器”中将USE_CUDNN定义项删除,再重新编译Caffe。


template <typename Dtype>class BaseConvolutionLayer : public Layer<Dtype> { public:  explicit BaseConvolutionLayer(const LayerParameter& param)      : Layer<Dtype>(param) {}  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top);  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top);  virtual inline int MinBottomBlobs() const { return 1; }  virtual inline int MinTopBlobs() const { return 1; }  virtual inline bool EqualNumBottomTopBlobs() const { return true; } protected:  // 一系列BLAS辅助函数  void forward_cpu_gemm(const Dtype* input, const Dtype* weights,      Dtype* output, bool skip_im2col = false);  void forward_cpu_bias(Dtype* output, const Dtype* bias);  void backward_cpu_gemm(const Dtype* input, const Dtype* weights,      Dtype* output);  void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype*      weights);  void backward_cpu_bias(Dtype* bias, const Dtype* input);#ifndef CPU_ONLY  void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights,      Dtype* output, bool skip_im2col = false);  void forward_gpu_bias(Dtype* output, const Dtype* bias);  void backward_gpu_gemm(const Dtype* input, const Dtype* weights,      Dtype* col_output);  void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype*      weights);  void backward_gpu_bias(Dtype* bias, const Dtype* input);#endif  /// @brief The spatial dimensions of the input.  inline int input_shape(int i) {    return (*bottom_shape_)[channel_axis_ + i];  }  // 纯虚函数,用来识别是卷积(return false)还是反卷积(return true)  virtual bool reverse_dimensions() = 0;  // 纯虚函数,计算卷积后输出尺寸虚函数  virtual void compute_output_shape() = 0;  /// 卷积核尺寸  Blob<int> kernel_shape_;  /// 卷积核平移步幅  Blob<int> stride_;  /// 图像补齐像素数  Blob<int> pad_;  /// 图像膨胀像素数  Blob<int> dilation_;  /// @brief The spatial dimensions of the convolution input.  Blob<int> conv_input_shape_;  /// @brief The spatial dimensions of the col_buffer.  vector<int> col_buffer_shape_;  /// @brief The spatial dimensions of the output.  vector<int> output_shape_;  const vector<int>* bottom_shape_;  int num_spatial_axes_;  int bottom_dim_;  int top_dim_;  int channel_axis_;  int num_;  int channels_;   // 通道数  int group_;      // 组数  int out_spatial_dim_;  int weight_offset_;    // 权重offset,此处为25*20 = 500  int num_output_;      // 输出数  bool bias_term_;      // 是否包含偏置项  bool is_1x1_;           // 是否为1x1模式  bool force_nd_im2col_;private:  // 将图像转成列向量  inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {      im2col_cpu(data, conv_in_channels_,          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],          pad_.cpu_data()[0], pad_.cpu_data()[1],          stride_.cpu_data()[0], stride_.cpu_data()[1],          dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff);    } else {      im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(),          col_buffer_shape_.data(), kernel_shape_.cpu_data(),          pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), col_buff);    }  }  inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {      col2im_cpu(col_buff, conv_in_channels_,          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],          pad_.cpu_data()[0], pad_.cpu_data()[1],          stride_.cpu_data()[0], stride_.cpu_data()[1],          dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);    } else {      col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(),          col_buffer_shape_.data(), kernel_shape_.cpu_data(),          pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), data);    }  }#ifndef CPU_ONLY  inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {      im2col_gpu(data, conv_in_channels_,          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],          pad_.cpu_data()[0], pad_.cpu_data()[1],          stride_.cpu_data()[0], stride_.cpu_data()[1],          dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff);    } else {      im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_,          conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),          kernel_shape_.gpu_data(), pad_.gpu_data(),          stride_.gpu_data(), dilation_.gpu_data(), col_buff);    }  }  inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {    if (!force_nd_im2col_ && num_spatial_axes_ == 2) {      col2im_gpu(col_buff, conv_in_channels_,          conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],          kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],          pad_.cpu_data()[0], pad_.cpu_data()[1],          stride_.cpu_data()[0], stride_.cpu_data()[1],          dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);    } else {      col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_,          conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),          kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(),          dilation_.gpu_data(), data);    }  }#endif  int num_kernels_im2col_;  int num_kernels_col2im_;  int conv_out_channels_;    // 卷积操作输出通道数  int conv_in_channels_;     // 卷积操作输入通道数  int conv_out_spatial_dim_;  int kernel_dim_;           // 卷积核的尺寸(例如5x5卷积核,kernel_dim_为25)  int col_offset_;  int output_offset_;  Blob<Dtype> col_buffer_;  Blob<Dtype> bias_multiplier_;};

base_conv_layer.cpp文件较长

template <typename Dtype>void BaseConvolutionLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,      const vector<Blob<Dtype>*>& top) {  // 设置滤波核尺寸等参数  ConvolutionParameter conv_param = this->layer_param_.convolution_param();  force_nd_im2col_ = conv_param.force_nd_im2col();  channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis());  const int first_spatial_axis = channel_axis_ + 1;  const int num_axes = bottom[0]->num_axes();  num_spatial_axes_ = num_axes - first_spatial_axis;  CHECK_GE(num_spatial_axes_, 0);  vector<int> bottom_dim_blob_shape(1, num_spatial_axes_ + 1);  vector<int> spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));  // 设置卷积核维度  kernel_shape_.Reshape(spatial_dim_blob_shape);  int* kernel_shape_data = kernel_shape_.mutable_cpu_data();  if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "kernel_h & kernel_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.kernel_size_size())        << "Either kernel_size or kernel_h/w should be specified; not both.";    kernel_shape_data[0] = conv_param.kernel_h();    kernel_shape_data[1] = conv_param.kernel_w();  } else {    const int num_kernel_dims = conv_param.kernel_size_size();    CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)        << "kernel_size must be specified once, or once per spatial dimension "        << "(kernel_size specified " << num_kernel_dims << " times; "        << num_spatial_axes_ << " spatial dims).";      for (int i = 0; i < num_spatial_axes_; ++i) {        kernel_shape_data[i] =            conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);      }  }  for (int i = 0; i < num_spatial_axes_; ++i) {    CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero.";  }  // 设置卷积核平移步幅  stride_.Reshape(spatial_dim_blob_shape);  int* stride_data = stride_.mutable_cpu_data();  if (conv_param.has_stride_h() || conv_param.has_stride_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "stride_h & stride_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.stride_size())        << "Either stride or stride_h/w should be specified; not both.";    stride_data[0] = conv_param.stride_h();    stride_data[1] = conv_param.stride_w();  } else {    const int num_stride_dims = conv_param.stride_size();    CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||          num_stride_dims == num_spatial_axes_)        << "stride must be specified once, or once per spatial dimension "        << "(stride specified " << num_stride_dims << " times; "        << num_spatial_axes_ << " spatial dims).";    const int kDefaultStride = 1;    for (int i = 0; i < num_spatial_axes_; ++i) {      stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :          conv_param.stride((num_stride_dims == 1) ? 0 : i);      CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero.";    }  }  // 设置图像补齐像素数  pad_.Reshape(spatial_dim_blob_shape);  int* pad_data = pad_.mutable_cpu_data();  if (conv_param.has_pad_h() || conv_param.has_pad_w()) {    CHECK_EQ(num_spatial_axes_, 2)        << "pad_h & pad_w can only be used for 2D convolution.";    CHECK_EQ(0, conv_param.pad_size())        << "Either pad or pad_h/w should be specified; not both.";    pad_data[0] = conv_param.pad_h();    pad_data[1] = conv_param.pad_w();  } else {    const int num_pad_dims = conv_param.pad_size();    CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||          num_pad_dims == num_spatial_axes_)        << "pad must be specified once, or once per spatial dimension "        << "(pad specified " << num_pad_dims << " times; "        << num_spatial_axes_ << " spatial dims).";    const int kDefaultPad = 0;    for (int i = 0; i < num_spatial_axes_; ++i) {      pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :          conv_param.pad((num_pad_dims == 1) ? 0 : i);    }  }  // 设置图像膨胀像素数  dilation_.Reshape(spatial_dim_blob_shape);  int* dilation_data = dilation_.mutable_cpu_data();  const int num_dilation_dims = conv_param.dilation_size();  CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||        num_dilation_dims == num_spatial_axes_)      << "dilation must be specified once, or once per spatial dimension "      << "(dilation specified " << num_dilation_dims << " times; "      << num_spatial_axes_ << " spatial dims).";  const int kDefaultDilation = 1;  for (int i = 0; i < num_spatial_axes_; ++i) {    dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :                       conv_param.dilation((num_dilation_dims == 1) ? 0 : i);  }  // 只有对于尺寸1x1、平移步幅为1、图像不补齐的卷积核才将标志位1x1置为true  is_1x1_ = true;  for (int i = 0; i < num_spatial_axes_; ++i) {    is_1x1_ &=        kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0;    if (!is_1x1_) { break; }  }  // 配置通道数  channels_ = bottom[0]->shape(channel_axis_);  num_output_ = this->layer_param_.convolution_param().num_output();  CHECK_GT(num_output_, 0);  group_ = this->layer_param_.convolution_param().group();  CHECK_EQ(channels_ % group_, 0);  CHECK_EQ(num_output_ % group_, 0)      << "Number of output should be multiples of group.";  if (reverse_dimensions()) {    conv_out_channels_ = channels_;    conv_in_channels_ = num_output_;  } else {    conv_out_channels_ = num_output_;  // 设置卷积操作输出通道数    conv_in_channels_ = channels_;     // 设置卷积操作输入通道数  }  // 处理权重参数和偏置参数  // - blobs_[0] holds the filter weights  // - blobs_[1] holds the biases (optional)  vector<int> weight_shape(2);  weight_shape[0] = conv_out_channels_;  weight_shape[1] = conv_in_channels_ / group_;  for (int i = 0; i < num_spatial_axes_; ++i) {    weight_shape.push_back(kernel_shape_data[i]);  }  // 从配置中读取是否包含偏置项  bias_term_ = this->layer_param_.convolution_param().bias_term();  vector<int> bias_shape(bias_term_, num_output_);  if (this->blobs_.size() > 0) {    CHECK_EQ(1 + bias_term_, this->blobs_.size())        << "Incorrect number of weight blobs.";    if (weight_shape != this->blobs_[0]->shape()) {      Blob<Dtype> weight_shaped_blob(weight_shape);      LOG(FATAL) << "Incorrect weight shape: expected shape "          << weight_shaped_blob.shape_string() << "; instead, shape was "          << this->blobs_[0]->shape_string();    }    if (bias_term_ && bias_shape != this->blobs_[1]->shape()) {      Blob<Dtype> bias_shaped_blob(bias_shape);      LOG(FATAL) << "Incorrect bias shape: expected shape "          << bias_shaped_blob.shape_string() << "; instead, shape was "          << this->blobs_[1]->shape_string();    }    LOG(INFO) << "Skipping parameter initialization";  } else {    if (bias_term_) {      this->blobs_.resize(2);  // 如果包含偏置项,则blobs_为两项    } else {      this->blobs_.resize(1);  // 如果不包含偏置项,则blobs_只有一项(权重项)    }    // 初始化和填充权重    // output channels x input channels per-group x kernel height x kernel width    this->blobs_[0].reset(new Blob<Dtype>(weight_shape));    shared_ptr<Filler<Dtype> > weight_filler(GetFiller<Dtype>(        this->layer_param_.convolution_param().weight_filler()));    // 使用随机数填充Xavier滤波核    weight_filler->Fill(this->blobs_[0].get());    // 如果启用偏置,则初始化偏置滤波核    if (bias_term_) {      this->blobs_[1].reset(new Blob<Dtype>(bias_shape));      shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(          this->layer_param_.convolution_param().bias_filler()));      // 使用0填充偏置滤波核      bias_filler->Fill(this->blobs_[1].get());    }  }  kernel_dim_ = this->blobs_[0]->count(1);  // 计算权重offset  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;  // Propagate gradients to the parameters (as directed by backward pass).  this->param_propagate_down_.resize(this->blobs_.size(), true);}// 正向传导矩阵运算函数 template <typename Dtype>void BaseConvolutionLayer<Dtype>::forward_cpu_gemm(const Dtype* input,    const Dtype* weights, Dtype* output, bool skip_im2col) {  // 先将图像转为列向量  const Dtype* col_buff = input;  if (!is_1x1_) {    if (!skip_im2col) {      conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());    }    col_buff = col_buffer_.cpu_data();  }  // 矩阵乘法实现卷积运算  for (int g = 0; g < group_; ++g) {    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /        group_, conv_out_spatial_dim_, kernel_dim_,        (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,        (Dtype)0., output + output_offset_ * g);  }}// CPU正向传导偏置(在上一步卷积计算结果上加上偏置)template <typename Dtype>void BaseConvolutionLayer<Dtype>::forward_cpu_bias(Dtype* output,    const Dtype* bias) {  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,      out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(),      (Dtype)1., output);}// CPU反向传导template <typename Dtype>void BaseConvolutionLayer<Dtype>::backward_cpu_gemm(const Dtype* output,    const Dtype* weights, Dtype* input) {  Dtype* col_buff = col_buffer_.mutable_cpu_data();  if (is_1x1_) {    col_buff = input;  }  for (int g = 0; g < group_; ++g) {    caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,        conv_out_spatial_dim_, conv_out_channels_ / group_,        (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,        (Dtype)0., col_buff + col_offset_ * g);  }  if (!is_1x1_) {    conv_col2im_cpu(col_buff, input);  }}// CPU计算权重的偏导template <typename Dtype>void BaseConvolutionLayer<Dtype>::weight_cpu_gemm(const Dtype* input,    const Dtype* output, Dtype* weights) {  const Dtype* col_buff = input;  if (!is_1x1_) {    conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());    col_buff = col_buffer_.cpu_data();  }  for (int g = 0; g < group_; ++g) {    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,        kernel_dim_, conv_out_spatial_dim_,        (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,        (Dtype)1., weights + weight_offset_ * g);  }}// CPU反向传导偏置项template <typename Dtype>void BaseConvolutionLayer<Dtype>::backward_cpu_bias(Dtype* bias,    const Dtype* input) {  caffe_cpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,      input, bias_multiplier_.cpu_data(), 1., bias);}#ifndef CPU_ONLY// GPU正向传导template <typename Dtype>void BaseConvolutionLayer<Dtype>::forward_gpu_gemm(const Dtype* input,    const Dtype* weights, Dtype* output, bool skip_im2col) {  const Dtype* col_buff = input;  if (!is_1x1_) {    if (!skip_im2col) {      conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());    }    col_buff = col_buffer_.gpu_data();  }  for (int g = 0; g < group_; ++g) {    caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /        group_, conv_out_spatial_dim_, kernel_dim_,        (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,        (Dtype)0., output + output_offset_ * g);  }}// GPU正向传导偏置项template <typename Dtype>void BaseConvolutionLayer<Dtype>::forward_gpu_bias(Dtype* output,    const Dtype* bias) {  caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_,      out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(),      (Dtype)1., output);}// GPU计算数据项偏导template <typename Dtype>void BaseConvolutionLayer<Dtype>::backward_gpu_gemm(const Dtype* output,    const Dtype* weights, Dtype* input) {  Dtype* col_buff = col_buffer_.mutable_gpu_data();  if (is_1x1_) {    col_buff = input;  }  for (int g = 0; g < group_; ++g) {    caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_,        conv_out_spatial_dim_, conv_out_channels_ / group_,        (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,        (Dtype)0., col_buff + col_offset_ * g);  }  if (!is_1x1_) {    conv_col2im_gpu(col_buff, input);  }}// GPU计算权重的偏导template <typename Dtype>void BaseConvolutionLayer<Dtype>::weight_gpu_gemm(const Dtype* input,    const Dtype* output, Dtype* weights) {  const Dtype* col_buff = input;  if (!is_1x1_) {    conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());    col_buff = col_buffer_.gpu_data();  }  for (int g = 0; g < group_; ++g) {    caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,        kernel_dim_, conv_out_spatial_dim_,        (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,        (Dtype)1., weights + weight_offset_ * g);  }}// GPU反向传导偏置项template <typename Dtype>void BaseConvolutionLayer<Dtype>::backward_gpu_bias(Dtype* bias,    const Dtype* input) {  caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, out_spatial_dim_, 1.,      input, bias_multiplier_.gpu_data(), 1., bias);}


1 0
原创粉丝点击