caffe代码详细注解--init
来源:互联网 发布:相册视频软件 编辑:程序博客网 时间:2024/06/08 10:36
Caffe net:init()函数代码详细注解
Caffe 中net的初始化函数init()是整个网络创建的关键函数。在此对此函数做详细的梳理。
一、代码的总体介绍
该init()函数中主要包括以下几个函数:
1. FilterNet(in_param,&filtered_param);
此函数的作用就是模型参数文件(*.prototxt)中的不符合规则的层去掉。例如:在caffe的examples/mnist中的lenet网络中,如果只是用于网络的前向,则需要将包含train的数据层去掉。如下:
- layer {
-
- name: "mnist"
- type: "Data"
- top: "data"
- top: "label"
- include {
- phase: TRAIN
- }
- transform_param {
- scale: 0.00390625
- }
- data_param {
- source: "examples/mnist/mnist_train_lmdb"
- batch_size: 64
- backend: LMDB
- }
- }
2、InsertSplits(filtered_param,¶m);
此函数作用是,对于底层一个输出blob对应多个上层的情况,则要在加入分裂层,形成新的网络。这么做的主要原因是多个层反传给该blob的梯度需要累加。
例如:LeNet网络中的数据层的top label blob对应两个输入层,分别是accuracy层和loss层,那么需要在数据层在插入一层。如下图:
数据层之上插入了一个新的层,label_mnist_1_split层,为该层的创建两个top blob分别为,Label_mnist_1_split_0和Label_mnist_1_split_1。
3、layers_.push_back();
该行代码是把当前层的参数转换为shared_ptr<Layer<Dtype>>,创建一个具体的层,并压入到layers_中
4、AppendBottom();
此函数为该层创建bottom blob,由于网络是堆叠而成,即:当前层的输出 bottom是前一层的输出top blob,因此此函数并没没有真正的创建blob,只是在将前一层的指针压入到了bottom_vecs_中。
5、AppendTop();
此函数为该层创建top blob,该函数真正的new的一个blob的对象。并将topblob 的指针压入到top_vecs_中
6、layers_[layer_id]->SetUp();
前面创建了具体的层,并为层创建了输入bottom blob 和输出top blob。改行代码这是启动该层,setup()函数的功能是为创建的blob分配数据内存空间,如有必要还需要调整该层的输入bottom blob 和输出top blob的shape。
7、AppendParam();
对于某些有参数的层,例如:卷基层、全连接层有weight和bias。该函数主要是修改和参数有关的变量,实际的层参数的blob在上面提到的setup()函数中已经创建。如:将层参数blob的指针压入到params_。
二、下面是对函数Net:init()的代码的详细注解。
- template <typename Dtype>
- void Net<Dtype>::Init(const NetParameter& in_param) {
- CHECK(Caffe::root_solver() || root_net_)
- << "root_net_ needs to be set for all non-root solvers";
-
- phase_ = in_param.state().phase();
-
-
- NetParameter filtered_param;
-
-
- FilterNet(in_param, &filtered_param);
- LOG_IF(INFO, Caffe::root_solver())
- << "Initializing net from parameters: " << std::endl
- << filtered_param.DebugString();
-
- NetParameter param;
-
-
-
-
- InsertSplits(filtered_param, ¶m);
-
-
-
-
-
-
-
- name_ = param.name();
- map<string, int> blob_name_to_idx;
- set<string> available_blobs;
- memory_used_ = 0;
-
- bottom_vecs_.resize(param.layer_size());
- top_vecs_.resize(param.layer_size());
- bottom_id_vecs_.resize(param.layer_size());
- param_id_vecs_.resize(param.layer_size());
- top_id_vecs_.resize(param.layer_size());
- bottom_need_backward_.resize(param.layer_size());
-
-
- for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) {
-
- bool share_from_root = !Caffe::root_solver()
- && root_net_->layers_[layer_id]->ShareInParallel();
-
-
- if (!param.layer(layer_id).has_phase()) {
- param.mutable_layer(layer_id)->set_phase(phase_);
- }
-
-
- const LayerParameter& layer_param = param.layer(layer_id);
- if (layer_param.propagate_down_size() > 0) {
- CHECK_EQ(layer_param.propagate_down_size(),
- layer_param.bottom_size())
- << "propagate_down param must be specified "
- << "either 0 or bottom_size times ";
- }
- if (share_from_root) {
- LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net";
- layers_.push_back(root_net_->layers_[layer_id]);
- layers_[layer_id]->SetShared(true);
- } else {
-
-
-
-
- layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param));
- }
-
- layer_names_.push_back(layer_param.name());
- LOG_IF(INFO, Caffe::root_solver())
- << "Creating Layer " << layer_param.name();
- bool need_backward = false;
-
-
-
-
- for (int bottom_id = 0; bottom_id < layer_param.bottom_size();
- ++bottom_id) {
- const int blob_id = AppendBottom(param, layer_id, bottom_id,
- &available_blobs, &blob_name_to_idx);
-
-
-
-
-
-
- need_backward |= blob_need_backward_[blob_id];
- }
-
- int num_top = layer_param.top_size();
- for (int top_id = 0; top_id < num_top; ++top_id) {
- AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);
-
- if (layer_param.type() == "Input") {
- const int blob_id = blobs_.size() - 1;
- net_input_blob_indices_.push_back(blob_id);
- net_input_blobs_.push_back(blobs_[blob_id].get());
- }
- }
-
-
-
- Layer<Dtype>* layer = layers_[layer_id].get();
- if (layer->AutoTopBlobs()) {
- const int needed_num_top =
- std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs());
- for (; num_top < needed_num_top; ++num_top) {
-
-
-
- AppendTop(param, layer_id, num_top, NULL, NULL);
- }
- }
-
- if (share_from_root) {
-
- const vector<Blob<Dtype>*>& base_top = root_net_->top_vecs_[layer_id];
- const vector<Blob<Dtype>*>& this_top = this->top_vecs_[layer_id];
- for (int top_id = 0; top_id < base_top.size(); ++top_id) {
- this_top[top_id]->ReshapeLike(*base_top[top_id]);
- LOG(INFO) << "Created top blob " << top_id << " (shape: "
- << this_top[top_id]->shape_string() << ") for shared layer "
- << layer_param.name();
- }
- } else {
-
- layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]);
- }
- LOG_IF(INFO, Caffe::root_solver())
- << "Setting up " << layer_names_[layer_id];
-
-
- for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
-
-
- if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) {
- blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
- }
-
-
- blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);
-
-
- LOG_IF(INFO, Caffe::root_solver())
- << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string();
-
- if (layer->loss(top_id)) {
- LOG_IF(INFO, Caffe::root_solver())
- << " with loss weight " << layer->loss(top_id);
- }
-
- memory_used_ += top_vecs_[layer_id][top_id]->count();
- }
- LOG_IF(INFO, Caffe::root_solver())
- << "Memory required for data: " << memory_used_ * sizeof(Dtype);
-
-
-
-
-
- const int param_size = layer_param.param_size();
-
- const int num_param_blobs = layers_[layer_id]->blobs().size();
-
-
- CHECK_LE(param_size, num_param_blobs)
- << "Too many params specified for layer " << layer_param.name();
- ParamSpec default_param_spec;
- for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
- const ParamSpec* param_spec = (param_id < param_size) ? &layer_param.param(param_id) : &default_param_spec;
- const bool param_need_backward = param_spec->lr_mult() != 0;
-
-
- need_backward |= param_need_backward;
- layers_[layer_id]->set_param_propagate_down(param_id,
- param_need_backward);
- }
-
-
-
-
-
-
- for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
- AppendParam(param, layer_id, param_id);
- }
-
- layer_need_backward_.push_back(need_backward);
-
-
-
-
-
- if (need_backward) {
- for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) {
- blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true;
- }
- }
- }
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- set<string> blobs_under_loss;
- set<string> blobs_skip_backp;
-
- for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) {
- bool layer_contributes_loss = false;
- bool layer_skip_propagate_down = true;
-
-
-
-
-
-
- for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
-
- const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]];
- if (layers_[layer_id]->loss(top_id) ||
- (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) {
- layer_contributes_loss = true;
- }
- if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) {
- layer_skip_propagate_down = false;
- }
- if (layer_contributes_loss && !layer_skip_propagate_down)
- break;
- }
-
-
- if (layer_need_backward_[layer_id] && layer_skip_propagate_down) {
- layer_need_backward_[layer_id] = false;
- for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
- ++bottom_id) {
-
- bottom_need_backward_[layer_id][bottom_id] = false;
- }
- }
- if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; }
- if (Caffe::root_solver()) {
- if (layer_need_backward_[layer_id]) {
- LOG(INFO) << layer_names_[layer_id] << " needs backward computation.";
- } else {
- LOG(INFO) << layer_names_[layer_id]
- << " does not need backward computation.";
- }
- }
-
- for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
- ++bottom_id) {
- if (layer_contributes_loss) {
- const string& blob_name =
- blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
- blobs_under_loss.insert(blob_name);
- } else {
- bottom_need_backward_[layer_id][bottom_id] = false;
- }
- if (!bottom_need_backward_[layer_id][bottom_id]) {
- const string& blob_name =
- blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
- blobs_skip_backp.insert(blob_name);
- }
- }
- }
-
- if (param.force_backward()) {
- for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
- layer_need_backward_[layer_id] = true;
- for (int bottom_id = 0;
- bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) {
- bottom_need_backward_[layer_id][bottom_id] =
- bottom_need_backward_[layer_id][bottom_id] ||
- layers_[layer_id]->AllowForceBackward(bottom_id);
- blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] =
- blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] ||
- bottom_need_backward_[layer_id][bottom_id];
- }
- for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
- ++param_id) {
- layers_[layer_id]->set_param_propagate_down(param_id, true);
- }
- }
- }
-
- for (set<string>::iterator it = available_blobs.begin();
- it != available_blobs.end(); ++it) {
- LOG_IF(INFO, Caffe::root_solver())
- << "This network produces output " << *it;
- net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
- net_output_blob_indices_.push_back(blob_name_to_idx[*it]);
- }
- for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) {
-
- blob_names_index_[blob_names_[blob_id]] = blob_id;
- }
- for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) {
-
- layer_names_index_[layer_names_[layer_id]] = layer_id;
- }
- ShareWeights();
- debug_info_ = param.debug_info();
- LOG_IF(INFO, Caffe::root_solver()) << "Network initialization done.";
- }