caffe源码 之 Blob类
来源:互联网 发布:mac字典扩展 编辑:程序博客网 时间:2024/05/21 11:10
本文主要解析caffe框架中源码文件/src/caffe/blob.cpp,该文件主要实现caffe的数据存储与传递。
caffe中Blob类主要用来表示网络中的数据,包括训练数据,网络各层自身的参数(包括权值、偏置以及它们的梯度),网络之间传递的数据都是通过 Blob 来实现的,同时 Blob 数据也支持在 CPU 与 GPU 上存储,能够在两者之间做同步。
下面是我看源码时,搜集的注释,以及对源码的理解
Blob.hpp::::::::::::::::
#ifndef CAFFE_BLOB_HPP_#define CAFFE_BLOB_HPP_#include <algorithm>#include <string>#include <vector>#include "caffe/common.hpp"#include "caffe/proto/caffe.pb.h"#include "caffe/syncedmem.hpp"const int kMaxBlobAxes = 32;namespace caffe {/** * @brief A wrapper around SyncedMemory holders serving as the basic * computational unit through which Layer%s, Net%s, and Solver%s * interact. * * TODO(dox): more thorough description. */template <typename Dtype>class Blob { public: Blob() //构造函数 : data_(), diff_(), count_(0), capacity_(0) {} /// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>. explicit Blob(const int num, const int channels, const int height, const int width); explicit Blob(const vector<int>& shape); /// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>. void Reshape(const int num, const int channels, const int height, const int width); /** * @brief Change the dimensions of the blob, allocating new memory if * necessary. * * This function can be called both to create an initial allocation * of memory, and to adjust the dimensions of a top blob during Layer::Reshape * or Layer::Forward. When changing the size of blob, memory will only be * reallocated if sufficient memory does not already exist, and excess memory * will never be freed. * * Note that reshaping an input blob and immediately calling Net::Backward is * an error; either Net::Forward or Net::Reshape need to be called to * propagate the new input shape to higher layers. */ void Reshape(const vector<int>& shape); void Reshape(const BlobShape& shape); void ReshapeLike(const Blob& other); inline string shape_string() const { ostringstream stream; //输出数据的维度,以空格分隔,最后输出一维维度(total) for (int i = 0; i < shape_.size(); ++i) { stream << shape_[i] << " "; } stream << "(" << count_ << ")"; return stream.str(); } //返回blob的shape inline const vector<int>& shape() const { return shape_; } /** * @brief Returns the dimension of the index-th axis (or the negative index-th * axis from the end, if index is negative). * * @param index the axis index, which may be negative as it will be * "canonicalized" using CanonicalAxisIndex. * Dies on out of range index. */ //获取第index维的大小,返回某一维的尺寸 inline int shape(int index) const { return shape_[CanonicalAxisIndex(index)]; } //返回数据维度就维的个数 inline int num_axes() const { return shape_.size(); } //返回数据的所有维度的乘积,即数据的个数 inline int count() const { return count_; } /** * @brief Compute the volume of a slice; i.e., the product of dimensions * among a range of axes. * * @param start_axis The first axis to include in the slice. * * @param end_axis The first axis to exclude from the slice. */ // 获取某几维数据的大小 inline int count(int start_axis, int end_axis) const { CHECK_LE(start_axis, end_axis); CHECK_GE(start_axis, 0); CHECK_GE(end_axis, 0); CHECK_LE(start_axis, num_axes()); CHECK_LE(end_axis, num_axes()); int count = 1; for (int i = start_axis; i < end_axis; ++i) { count *= shape(i); } return count; } /** * @brief Compute the volume of a slice spanning from a particular first * axis to the final axis. * * @param start_axis The first axis to include in the slice. */ // 给定的维度到最后的维度之间包含的数据个数 inline int count(int start_axis) const { return count(start_axis, num_axes()); } /** * @brief Returns the 'canonical' version of a (usually) user-specified axis, * allowing for negative indexing (e.g., -1 for the last axis). * * @param axis_index the axis index. * If 0 <= index < num_axes(), return index. * If -num_axes <= index <= -1, return (num_axes() - (-index)), * e.g., the last axis index (num_axes() - 1) if index == -1, * the second to last if index == -2, etc. * Dies on out of range index. */ // 支持负数维度索引,负数表示从后往前,返回的是正确的维度索引(相当于将负数索引进行的转换) // Blob的Index是可以从负坐标开始读的,标准化索引,主要是对参数索引进行标准化,以满足要求,转换坐标轴索引[-N,N]为[0,N] inline int CanonicalAxisIndex(int axis_index) const { // 判断是否在范围内[-numaxes, numaxes] CHECK_GE(axis_index, -num_axes()) << "axis " << axis_index << " out of range for " << num_axes() << "-D Blob with shape " << shape_string(); CHECK_LT(axis_index, num_axes()) << "axis " << axis_index << " out of range for " << num_axes() << "-D Blob with shape " << shape_string(); if (axis_index < 0) { return axis_index + num_axes(); } return axis_index; } /// @brief Deprecated legacy shape accessor num: use shape(0) instead. inline int num() const { return LegacyShape(0); } /// @brief Deprecated legacy shape accessor channels: use shape(1) instead. inline int channels() const { return LegacyShape(1); } /// @brief Deprecated legacy shape accessor height: use shape(2) instead. inline int height() const { return LegacyShape(2); } /// @brief Deprecated legacy shape accessor width: use shape(3) instead. inline int width() const { return LegacyShape(3); } // 检查blob的维度个数是不是小于4,Blob中的4个维num,channel,height,width可以直接通过shape(0),shape(1),shape(2),shape(3)来访问 // 返回的是每维数据的大小,等同于shape()函数的功能s inline int LegacyShape(int index) const { CHECK_LE(num_axes(), 4) << "Cannot use legacy accessors on Blobs with > 4 axes."; CHECK_LT(index, 4); // 检查维度索引是不是小于4 CHECK_GE(index, -4); // 检查维度索引是不是大于-4 if (index >= num_axes() || index < -num_axes()) { // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse // indexing) -- this special case simulates the one-padding used to fill // extraneous axes of legacy blobs. return 1; } return shape(index); } // 计算一维线性偏移量 inline int offset(const int n, const int c = 0, const int h = 0, const int w = 0) const { CHECK_GE(n, 0); /*判断输入参数是否超过阈值*/ CHECK_LE(n, num()); CHECK_GE(channels(), 0); CHECK_LE(c, channels()); CHECK_GE(height(), 0); CHECK_LE(h, height()); CHECK_GE(width(), 0); CHECK_LE(w, width()); return ((n * channels() + c) * height() + h) * width() + w; } // 计算一维线性偏移量,只不过参数用的是vector<int> inline int offset(const vector<int>& indices) const { CHECK_LE(indices.size(), num_axes()); int offset = 0; for (int i = 0; i < num_axes(); ++i) { offset *= shape(i); if (indices.size() > i) { CHECK_GE(indices[i], 0); CHECK_LT(indices[i], shape(i)); offset += indices[i]; } } return offset; } /** * @brief Copy from a source Blob. * * @param source the Blob to copy from * @param copy_diff if false, copy the data; if true, copy the diff * @param reshape if false, require this Blob to be pre-shaped to the shape * of other (and die otherwise); if true, Reshape this Blob to other's * shape if necessary * 从给定的blob进行复制,如果copy_diff=true则新的blob复制的是diff, * 如果reshape=true则改变新blob的形状 */ void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false, bool reshape = false); // 获取在内存下的数据(前向传播所用的数据) inline Dtype data_at(const int n, const int c, const int h, const int w) const { return cpu_data()[offset(n, c, h, w)]; } // 获取在内存下的后向传播的数据 inline Dtype diff_at(const int n, const int c, const int h, const int w) const { return cpu_diff()[offset(n, c, h, w)]; } // 获取cpu内存中offset指定位置的前向传播数据 inline Dtype data_at(const vector<int>& index) const { return cpu_data()[offset(index)]; } // 获取cpu内存中offset指定位置的返向传播数据 inline Dtype diff_at(const vector<int>& index) const { return cpu_diff()[offset(index)]; } // 返回前向传播数据地址(前向传播数据一般为图像本身数据) inline const shared_ptr<SyncedMemory>& data() const { CHECK(data_); return data_; } // 返回后向传播数据地址(后向传播数据一般为图像数据导数) inline const shared_ptr<SyncedMemory>& diff() const { CHECK(diff_); return diff_; } //内存数据的地址返回,数据清空等操作,详见.cpp const Dtype* cpu_data() const; void set_cpu_data(Dtype* data); const int* gpu_shape() const; const Dtype* gpu_data() const; const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; // 一些内存同步与处理的函数见SycedMem.cpp中具体定义 Dtype* mutable_cpu_data(); Dtype* mutable_gpu_data(); Dtype* mutable_cpu_diff(); Dtype* mutable_gpu_diff(); // 数据更新,blob里面的data部分减去diff部分 void Update(); // 从protobuf序列化文件读取blob对象 void FromProto(const BlobProto& proto, bool reshape = true); // 将对象序列化为protobuf文件 void ToProto(BlobProto* proto, bool write_diff = false) const; /// @brief Compute the sum of absolute values (L1 norm) of the data. // 计算data的L1范数 Dtype asum_data() const; /// @brief Compute the sum of absolute values (L1 norm) of the diff. // 计算diff的L1范数 Dtype asum_diff() const; /// @brief Compute the sum of squares (L2 norm squared) of the data. // 计算data的L2范数 Dtype sumsq_data() const; /// @brief Compute the sum of squares (L2 norm squared) of the diff. // 计算diff的L2范数 Dtype sumsq_diff() const; /// @brief Scale the blob data by a constant factor. // 归一化data数据 void scale_data(Dtype scale_factor); /// @brief Scale the blob diff by a constant factor. // 归一化diff数据 void scale_diff(Dtype scale_factor); /** * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the * data_ of Blob other -- useful in Layer%s which simply perform a copy * in their Forward pass. * * This deallocates the SyncedMemory holding this Blob's data_, as * shared_ptr calls its destructor when reset with the "=" operator. */ // 与other共享data数据,把other的data数据指针传给本blob void ShareData(const Blob& other); /** * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the * diff_ of Blob other -- useful in Layer%s which simply perform a copy * in their Forward pass. * * This deallocates the SyncedMemory holding this Blob's diff_, as * shared_ptr calls its destructor when reset with the "=" operator. */ // 与other共享diff数据,把other的diff数据指针传给本blob void ShareDiff(const Blob& other); // 判断本blob与other形状是否相等 bool ShapeEquals(const BlobProto& other); protected:/*shared_ptr属于boost库的智能指针*/ // 前向传播的数据 shared_ptr<SyncedMemory> data_; // diff是反向传播的数据即偏差 shared_ptr<SyncedMemory> diff_; // 旧的存储Blob的形状 shared_ptr<SyncedMemory> shape_data_; // 新的存储Blob的形状 vector<int> shape_; //数据的个数,也就是个数*通道数*高度*宽度 (实际数据的大小) int count_; //元素个数 (内存最大能存储数据的大小) int capacity_; DISABLE_COPY_AND_ASSIGN(Blob);}; // class Blob} // namespace caffe#endif // CAFFE_BLOB_HPP_
Blob.cpp::::::::::::::::
#include <climits>#include <vector>#include "caffe/blob.hpp"#include "caffe/common.hpp"#include "caffe/syncedmem.hpp"#include "caffe/util/math_functions.hpp"namespace caffe {template <typename Dtype> /*老的reshape方法,调用下面的新reshape*/void Blob<Dtype>::Reshape(const int num, const int channels, const int height, const int width) { vector<int> shape(4); shape[0] = num; shape[1] = channels; shape[2] = height; shape[3] = width; Reshape(shape);}template <typename Dtype> /*新的reshape及其具体实现*/void Blob<Dtype>::Reshape(const vector<int>& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); //是否小于规定的最大BLOB的维度(32维) count_ = 1; shape_.resize(shape.size()); //首先将大小设置为vector<int> shape_; 即新的形状数据的大小 if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) { shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int))); } int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { // 检查形状数据是否合法 CHECK_GE(shape[i], 0); if (count_ != 0) { CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; } // 计算数据个数 count_ *= shape[i]; // 复制shape到新的和旧的形状数据 shape_[i] = shape[i]; shape_data[i] = shape[i]; } // 判断是否大于存储的容量 if (count_ > capacity_) { capacity_ = count_; // 重新分配内存 data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); }}// 所谓的reshape实际上就仅仅是复制了shape的数据而已 template <typename Dtype>void Blob<Dtype>::Reshape(const BlobShape& shape) { CHECK_LE(shape.dim_size(), kMaxBlobAxes);// 维度是否小于32 vector<int> shape_vec(shape.dim_size()); // 复制形状数据 for (int i = 0; i < shape.dim_size(); ++i) { shape_vec[i] = shape.dim(i); } // 调用新的reshape函数 Reshape(shape_vec);}/*依照其他blob来修改当前blob的形状*/template <typename Dtype>void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) { Reshape(other.shape());}/*blob构造函数*/template <typename Dtype>Blob<Dtype>::Blob(const int num, const int channels, const int height, const int width) // capacity_ must be initialized before calling Reshape : capacity_(0) { Reshape(num, channels, height, width); //先初始化容量为0,然后用reshape来分配内存了}/*blob构造函数*/template <typename Dtype>Blob<Dtype>::Blob(const vector<int>& shape) // capacity_ must be initialized before calling Reshape : capacity_(0) { Reshape(shape);}/*返回gpu中blob对象中数据的内存地址*/template <typename Dtype> const int* Blob<Dtype>::gpu_shape() const { CHECK(shape_data_); return (const int*)shape_data_->gpu_data();}/*返回cpu中blob对象中数据的内存地址*/template <typename Dtype>const Dtype* Blob<Dtype>::cpu_data() const { CHECK(data_); return (const Dtype*)data_->cpu_data();}/*调用SyncedMemory的set_cpu_data函数来设置cpu的数据的内存地址,并清空数据*/template <typename Dtype>void Blob<Dtype>::set_cpu_data(Dtype* data) { CHECK(data); data_->set_cpu_data(data);}/*返回gpu中blob对象中数据的内存地址*/template <typename Dtype>const Dtype* Blob<Dtype>::gpu_data() const { CHECK(data_); return (const Dtype*)data_->gpu_data();}/*返回cpu中blob对象中数据的导数的内存地址*/template <typename Dtype>const Dtype* Blob<Dtype>::cpu_diff() const { CHECK(diff_); return (const Dtype*)diff_->cpu_data();}/*返回gpu中blob对象中数据的导数的内存地址*/template <typename Dtype>const Dtype* Blob<Dtype>::gpu_diff() const { CHECK(diff_); return (const Dtype*)diff_->gpu_data();}//调用SyncedMemory.cpp中的mutable_cpu_data()template <typename Dtype>Dtype* Blob<Dtype>::mutable_cpu_data() { CHECK(data_); return static_cast<Dtype*>(data_->mutable_cpu_data());}//调用SyncedMemory.cpp中的mutable_gpu_data()template <typename Dtype>Dtype* Blob<Dtype>::mutable_gpu_data() { CHECK(data_); return static_cast<Dtype*>(data_->mutable_gpu_data());}//调用SyncedMemory.cpp中的mutable_cpu_data()template <typename Dtype>Dtype* Blob<Dtype>::mutable_cpu_diff() { CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_cpu_data());}//调用SyncedMemory.cpp中的mutable_gpu_data()template <typename Dtype>Dtype* Blob<Dtype>::mutable_gpu_diff() { CHECK(diff_); return static_cast<Dtype*>(diff_->mutable_gpu_data());}// 当前blob数据的指针指向其他blob的数据,以实现共享datatemplate <typename Dtype>void Blob<Dtype>::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data();}// 当前blob数据的指针指向其他blob的数据,以实现共享difftemplate <typename Dtype>void Blob<Dtype>::ShareDiff(const Blob& other) { CHECK_EQ(count_, other.count()); diff_ = other.diff();}// The "update" method is used for parameter blobs in a Net, which are stored// as Blob<float> or Blob<double> -- hence we do not define it for// Blob<int> or Blob<unsigned int>.template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }// Update是计算data=-1 * diff + data // 更新data_的数据,合并data与difftemplate <typename Dtype>void Blob<Dtype>::Update() { // We will perform update based on where the data is located. switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: // perform computation on CPU // axpby即alpha * x plus beta *y 这个含义,blas的函数命名真是见名知意 // caffe_axpy计算的是Y=alpha * X + Y ,其中alpha=-1了这里 // 存储的时候用到了mutable_cpu_data,防止其他线程访问 caffe_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->cpu_data()), static_cast<Dtype*>(data_->mutable_cpu_data())); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY // perform computation on GPU caffe_gpu_axpy<Dtype>(count_, Dtype(-1), static_cast<const Dtype*>(diff_->gpu_data()), static_cast<Dtype*>(data_->mutable_gpu_data()));#else NO_GPU;#endif break; default: LOG(FATAL) << "Syncedmem not initialized."; }}template <> unsigned int Blob<unsigned int>::asum_data() const { NOT_IMPLEMENTED; return 0;}template <> int Blob<int>::asum_data() const { NOT_IMPLEMENTED; return 0;}// 计算data的L1范数 // 调用math_function.hpp中的函数caffe_cpu_asum()和caffe_gpu_asum// 实现求cpu_data或者gpu_data中每个元素绝对值的和template <typename Dtype>Dtype Blob<Dtype>::asum_data() const { if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_data()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_data(), &asum); return asum; }#else NO_GPU;#endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return 0;}template <> unsigned int Blob<unsigned int>::asum_diff() const { NOT_IMPLEMENTED; return 0;}template <> int Blob<int>::asum_diff() const { NOT_IMPLEMENTED; return 0;}// 计算diff的L1范数 // 调用math_function.hpp中的函数caffe_cpu_asum()和caffe_gpu_asum// 实现求cpu_diff或者gpu_diff中每个元素绝对值的和template <typename Dtype>Dtype Blob<Dtype>::asum_diff() const { if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_diff()); case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY { Dtype asum; caffe_gpu_asum(count_, gpu_diff(), &asum); return asum; }#else NO_GPU;#endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } return 0;}template <> unsigned int Blob<unsigned int>::sumsq_data() const { NOT_IMPLEMENTED; return 0;}template <> int Blob<int>::sumsq_data() const { NOT_IMPLEMENTED; return 0;}// 计算sum of square of data(L2范数) // 调用math_function.hpp中的中的函数caffe_cpu_dot(),caffe_cpu_strided_dot(),caffe_gpu_dot(), caffe_gpu_strided_dot()// 实现求cpu_data或者gpu_data中每个元素绝对值的平方的和template <typename Dtype>Dtype Blob<Dtype>::sumsq_data() const { Dtype sumsq; const Dtype* data; if (!data_) { return 0; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = cpu_data(); sumsq = caffe_cpu_dot(count_, data, data); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY data = gpu_data(); caffe_gpu_dot(count_, data, data, &sumsq);#else NO_GPU;#endif break; case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq;}template <> unsigned int Blob<unsigned int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0;}template <> int Blob<int>::sumsq_diff() const { NOT_IMPLEMENTED; return 0;}// 计算sum of square of diff(L2范数) // 调用math_function.hpp中的中的函数caffe_cpu_dot(),caffe_cpu_strided_dot(),caffe_gpu_dot(), caffe_gpu_strided_dot()// 实现求cpu_diff或者gpu_diff中每个元素绝对值的平方的和template <typename Dtype>Dtype Blob<Dtype>::sumsq_diff() const { Dtype sumsq; const Dtype* diff; if (!diff_) { return 0; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = cpu_diff(); sumsq = caffe_cpu_dot(count_, diff, diff); break; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY diff = gpu_diff(); caffe_gpu_dot(count_, diff, diff, &sumsq); break;#else NO_GPU;#endif case SyncedMemory::UNINITIALIZED: return 0; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); } return sumsq;}template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) { NOT_IMPLEMENTED;}template <> void Blob<int>::scale_data(int scale_factor) { NOT_IMPLEMENTED;}// 将data部分乘以一个因子scale_factor template <typename Dtype>void Blob<Dtype>::scale_data(Dtype scale_factor) { Dtype* data; if (!data_) { return; } switch (data_->head()) { case SyncedMemory::HEAD_AT_CPU: data = mutable_cpu_data(); caffe_scal(count_, scale_factor, data); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY data = mutable_gpu_data(); caffe_gpu_scal(count_, scale_factor, data); return;#else NO_GPU;#endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); }}template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) { NOT_IMPLEMENTED;}template <> void Blob<int>::scale_diff(int scale_factor) { NOT_IMPLEMENTED;}// 将diff部分乘以一个因子sacle_factor template <typename Dtype>void Blob<Dtype>::scale_diff(Dtype scale_factor) { Dtype* diff; if (!diff_) { return; } switch (diff_->head()) { case SyncedMemory::HEAD_AT_CPU: diff = mutable_cpu_diff(); caffe_scal(count_, scale_factor, diff); return; case SyncedMemory::HEAD_AT_GPU: case SyncedMemory::SYNCED:#ifndef CPU_ONLY diff = mutable_gpu_diff(); caffe_gpu_scal(count_, scale_factor, diff); return;#else NO_GPU;#endif case SyncedMemory::UNINITIALIZED: return; default: LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); }}//两个blob的形状是否一样template <typename Dtype>bool Blob<Dtype>::ShapeEquals(const BlobProto& other) { if (other.has_num() || other.has_channels() || other.has_height() || other.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). // Note: we do not use the normal Blob::num(), Blob::channels(), etc. // methods as these index from the beginning of the blob shape, where legacy // parameter blobs were indexed from the end of the blob shape (e.g., bias // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)). return shape_.size() <= 4 && LegacyShape(-4) == other.num() && LegacyShape(-3) == other.channels() && LegacyShape(-2) == other.height() && LegacyShape(-1) == other.width(); } // 如果不是旧的blob则直接判断 vector<int> other_shape(other.shape().dim_size()); for (int i = 0; i < other.shape().dim_size(); ++i) { other_shape[i] = other.shape().dim(i); } return shape_ == other_shape;}// 从别的blob进行复制 template <typename Dtype>void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) { if (source.count() != count_ || source.shape() != shape_) { if (reshape) { ReshapeLike(source);//复制shape数据 } else { LOG(FATAL) << "Trying to copy blobs of different sizes."; } } switch (Caffe::mode()) { case Caffe::GPU: // GPU复制diff if (copy_diff) { // 这都用 template <> void caffe_copy<float>(const int N, const float* X, float* Y) { cblas_scopy(N, X, 1, Y, 1); } caffe_copy(count_, source.gpu_diff(), static_cast<Dtype*>(diff_->mutable_gpu_data())); } else { caffe_copy(count_, source.gpu_data(), static_cast<Dtype*>(data_->mutable_gpu_data())); } break; // CPU复制diff case Caffe::CPU: if (copy_diff) { caffe_copy(count_, source.cpu_diff(), static_cast<Dtype*>(diff_->mutable_cpu_data())); } else { caffe_copy(count_, source.cpu_data(), static_cast<Dtype*>(data_->mutable_cpu_data())); } break; default: LOG(FATAL) << "Unknown caffe mode."; }}// 从定义在caffe.proto 中的一个message来复制数据template <typename Dtype>void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) { if (reshape) { vector<int> shape; if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { // Using deprecated 4D Blob dimensions -- // shape is (num, channels, height, width). // 如果是旧的blob直接转换为新的blob中的shape数据 shape.resize(4); shape[0] = proto.num(); shape[1] = proto.channels(); shape[2] = proto.height(); shape[3] = proto.width(); } else { shape.resize(proto.shape().dim_size()); for (int i = 0; i < proto.shape().dim_size(); ++i) { shape[i] = proto.shape().dim(i); } } Reshape(shape);// 复制shape数据到当前blob } else { CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)"; } // copy data Dtype* data_vec = mutable_cpu_data();// 获取当前的blob在内存上的数据指针,该指针是互斥的 if (proto.double_data_size() > 0) { CHECK_EQ(count_, proto.double_data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.double_data(i); } } else { CHECK_EQ(count_, proto.data_size()); for (int i = 0; i < count_; ++i) { data_vec[i] = proto.data(i); } } if (proto.double_diff_size() > 0) { CHECK_EQ(count_, proto.double_diff_size()); Dtype* diff_vec = mutable_cpu_diff();// 获取当前的diff在内存上的数据指针,该指针是互斥的 for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.double_diff(i); } } else if (proto.diff_size() > 0) { CHECK_EQ(count_, proto.diff_size()); Dtype* diff_vec = mutable_cpu_diff(); for (int i = 0; i < count_; ++i) { diff_vec[i] = proto.diff(i); } }}//将数据写到prototemplate <>void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_double_data(); proto->clear_double_diff(); const double* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_double_data(data_vec[i]);//将data写入proto } if (write_diff) { const double* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_double_diff(diff_vec[i]);//将diff写入proto } }}template <>void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const { proto->clear_shape(); for (int i = 0; i < shape_.size(); ++i) { proto->mutable_shape()->add_dim(shape_[i]); } proto->clear_data(); proto->clear_diff(); const float* data_vec = cpu_data(); for (int i = 0; i < count_; ++i) { proto->add_data(data_vec[i]); } if (write_diff) { const float* diff_vec = cpu_diff(); for (int i = 0; i < count_; ++i) { proto->add_diff(diff_vec[i]); } }}INSTANTIATE_CLASS(Blob);template class Blob<int>;template class Blob<unsigned int>;} // namespace caffe
0 0
- caffe源码 之 Blob类
- caffe源码解析之blob
- caffe源码之blob.cpp
- caffe源码剖析之Blob
- caffe源码解读之Blob
- caffe源码阅读之Blob
- caffe源码分析--Blob类
- caffe源码解析之blob(1)
- CAFFE源码学习笔记之六-Blob
- caffe源码解析之blob.hpp或blob.cpp
- caffe源码解析之blob.hpp或blob.cpp
- caffe源码解析之blob.hpp或blob.cpp
- caffe源码分析--Blob类代码研究
- caffe源码分析--Blob类代码研究
- caffe源码阅读——Blob类
- caffe源码阅读——Blob类
- caffe源码:Blob
- caffe源码追踪--blob
- Linux ALSA声卡驱动之六:ASoC架构中的Machine
- 欢迎使用CSDN-markdown编辑器
- Android 引入so包的两种方式
- 第十一周-OJ-E求两整数的最大公约数与最小公倍数
- poj1195 二维树状数组【pascal】
- caffe源码 之 Blob类
- redis
- Java 简介
- Python开发环境Spyder安装方法
- 安卓获得view的宽高
- IntelliJ IDEA 15快捷键大全
- cache映射
- java获取两个字符串日期之间的时间间隔天数,以及遍历这些天数
- html