【撸码caffe四】 solver.cpp&&sgd_solver.cpp

来源:互联网 发布:mssql备份数据库命令 编辑:程序博客网 时间:2024/06/05 18:54

caffe中solver的作用就是交替低啊用前向(forward)算法和后向(backward)算法来更新参数,从而最小化loss,实际上就是一种迭代的优化算法。

solver.cpp中的Solver提供了执行模型训练的入口,在caffe.cpp中train函数的最后通过 solver->Solve()调用:

template <typename Dtype>void Solver<Dtype>::Solve(const char* resume_file) {//检查是否是root_solver,有多个GPU的情况下,允许设置多个solver,GPU间并行工作,//第一个solver设置为root_solver  CHECK(Caffe::root_solver());  //网络名称  LOG(INFO) << "Solving " << net_->name();  //学习策略  LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy();  // Initialize to false every time we start solving.  requested_early_exit_ = false;  //是否需要从指针所指向的内存读取出之前的训练状态并恢复  if (resume_file) {    LOG(INFO) << "Restoring previous solver status from " << resume_file;    Restore(resume_file);  }  // For a network that is trained by the solver, no bottom or top vecs  // should be given, and we will just provide dummy vecs.  int start_iter = iter_;  //逐步迭代开始  Step(param_.max_iter() - iter_);……}


Solver首先判断执行模式,输出网络名称以及学习策略,并判断是否需要恢复之前的训练状态,之后开始调用Step函数,开始迭代过程。Solver类中的Step函数完成网络模型的逐步优化迭代过程:

template <typename Dtype>//Step函数完成实际的逐步迭代优化过程void Solver<Dtype>::Step(int iters) {//设置开始的迭代次数,如果之前设置了是从snapshot中恢复的,则会从//snapshot的训练状态继续执行训练  const int start_iter = iter_;  //总的迭代次数  const int stop_iter = iter_ + iters;  //获取设置的要计算之前多少次的loss均值,默认的average_loss为1  int average_loss = this->param_.average_loss();  //清除保存loss的向量  losses_.clear();  //平均loss初始化为0  smoothed_loss_ = 0;  //执行迭代  while (iter_ < stop_iter) {  //清零上一次反向传输过程中产生的梯度数据    // zero-init the params    net_->ClearParamDiffs();//判断条件,是否执行一次所有测试    if (param_.test_interval() && iter_ % param_.test_interval() == 0        && (iter_ > 0 || param_.test_initialization())        && Caffe::root_solver()) {      TestAll();      if (requested_early_exit_) {        // Break out of the while loop because stop was requested while testing.        break;      }    }    for (int i = 0; i < callbacks_.size(); ++i) {      callbacks_[i]->on_start();    }//是否输出loss等信息    const bool display = param_.display() && iter_ % param_.display() == 0;    net_->set_debug_info(display && param_.debug_info());    // accumulate the loss and gradient    Dtype loss = 0;//iter_size是在solver.prototxt中设置的,把数据分为多少批次分开迭代,对应还有一个名称为//batch_size的变量,是在网络中定义的,batch_size定义每批次包含的样本数量,把一个大的//样本数量分批次训练可以提高训练效率,总的样本数量=iter_size*batch_size    for (int i = 0; i < param_.iter_size(); ++i) {//累加所有批次的平均误差      loss += net_->ForwardBackward();    }//计算批次的平均误差    loss /= param_.iter_size();//更新输出的当前的average_loss个样本的平均loss    // average the loss across iterations for smoothed reporting    UpdateSmoothedLoss(loss, start_iter, average_loss);    if (display) {//输出迭代次数,平均loss      LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_          << ", loss = " << smoothed_loss_;      const vector<Blob<Dtype>*>& result = net_->output_blobs();      int score_index = 0;      for (int j = 0; j < result.size(); ++j) {        const Dtype* result_vec = result[j]->cpu_data();        const string& output_name =            net_->blob_names()[net_->output_blob_indices()[j]];        const Dtype loss_weight =            net_->blob_loss_weights()[net_->output_blob_indices()[j]];        for (int k = 0; k < result[j]->count(); ++k) {          ostringstream loss_msg_stream;          if (loss_weight) {            loss_msg_stream << " (* " << loss_weight                            << " = " << loss_weight * result_vec[k] << " loss)";          }          LOG_IF(INFO, Caffe::root_solver()) << "    Train net output #"              << score_index++ << ": " << output_name << " = "              << result_vec[k] << loss_msg_stream.str();        }      }    }    for (int i = 0; i < callbacks_.size(); ++i) {      callbacks_[i]->on_gradients_ready();    }//执行网络更新,每一组网络中的参数的更新都是不同类型的solver实现各自的//ApplyUpdate函数中完成的    ApplyUpdate();    // Increment the internal iter_ counter -- its value should always indicate    // the number of times the weights have been updated.    ++iter_;    SolverAction::Enum request = GetRequestedAction();    // Save a snapshot if needed.    if ((param_.snapshot()         && iter_ % param_.snapshot() == 0         && Caffe::root_solver()) ||         (request == SolverAction::SNAPSHOT)) {      Snapshot();    }    if (SolverAction::STOP == request) {      requested_early_exit_ = true;      // Break out of training loop.      break;    }  }}


一次完整的训练流程包括一次前向传输和一次反向传输,分别计算模型的loss和梯度,通过梯度数据计算出参数的更新,更新是通过在Step函数中调用ApplyUpdate函数完成的,ApplyUpdate是在SGDSolver类中定义的:

template <typename Dtype>void SGDSolver<Dtype>::ApplyUpdate() {  CHECK(Caffe::root_solver());  //根据设置的lr_policy,依据对应的规则计算当前迭代的learning rete的值  Dtype rate = GetLearningRate();  //是否输出当前的迭代次数和学习率数据  if (this->param_.display() && this->iter_ % this->param_.display() == 0) {    LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate;  }  //避免梯度爆炸,如果梯度的L1或L2范数超过了某个上限值,则将梯度减小  ClipGradients();    //更新所有参数,包括卷积层和池化层的卷积核和偏置两组参数  for (int param_id = 0; param_id < this->net_->learnable_params().size();       ++param_id) {  //将参数的梯度归一化,除以iter_size,其作用是保证实际的batch_size=iter_size*batch_size    Normalize(param_id);//将正则化部分的梯度存入到每个参数的梯度中    Regularize(param_id);//计算SGD算法的梯度(momentum等)    ComputeUpdateValue(param_id, rate);  }  //调用Net::Update更新参数  this->net_->Update();}template <typename Dtype>void SGDSolver<Dtype>::Normalize(int param_id) {//如果训练数据的批次数为1,则不进行归一化,直接返回  if (this->param_.iter_size() == 1) { return; }  //获取所有要优化的参数  // Scale gradient to counterbalance accumulation.  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();  //归一化系数  const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size();  switch (Caffe::mode()) {  case Caffe::CPU: {   //CPU中执行归一化操作的函数    caffe_scal(net_params[param_id]->count(), accum_normalization,        net_params[param_id]->mutable_cpu_diff());    break;  }  case Caffe::GPU: {#ifndef CPU_ONLY    caffe_gpu_scal(net_params[param_id]->count(), accum_normalization,        net_params[param_id]->mutable_gpu_diff());#else    NO_GPU;#endif    break;  }  default:    LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode();  }}template <typename Dtype>void SGDSolver<Dtype>::Regularize(int param_id) {//获取所有要优化的参数  const vector<Blob<Dtype>*>& net_params = this->net_->learnable_params();  //获取所有要优化的参数的权重衰减向量  const vector<float>& net_params_weight_decay =      this->net_->params_weight_decay();  //获取网络模型整体的权重衰减  Dtype weight_decay = this->param_.weight_decay();  //获取网络的正则化类型,L1或者L2  string regularization_type = this->param_.regularization_type();  //每一个参数的权重衰减等于每个参数的权重衰减乘以网络整体的权重衰减  Dtype local_decay = weight_decay * net_params_weight_decay[param_id];  switch (Caffe::mode()) {  case Caffe::CPU: {    if (local_decay) { //权重为0时,代表梯度消失      if (regularization_type == "L2") {        // add weight decay  //执行正则化,L2的梯度diff_=weight_decay*data_+diff_        caffe_axpy(net_params[param_id]->count(),            local_decay,            net_params[param_id]->cpu_data(),            net_params[param_id]->mutable_cpu_diff());      } else if (regularization_type == "L1") {        caffe_cpu_sign(net_params[param_id]->count(),            net_params[param_id]->cpu_data(),            temp_[param_id]->mutable_cpu_data());        caffe_axpy(net_params[param_id]->count(),            local_decay,            temp_[param_id]->cpu_data(),            net_params[param_id]->mutable_cpu_diff());      } else {        LOG(FATAL) << "Unknown regularization type: " << regularization_type;      }    }    break;  }……}


原创粉丝点击