(二)caffe 网络训练执行流程
来源:互联网 发布:大数据营销案例沃尔玛 编辑:程序博客网 时间:2024/05/22 15:41
暂时提交
caffe训练网络的执行脚本如下(例如运行lenet)
./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototx
在Qt中运行的时候,参考http://blog.csdn.net/qyczyr/article/details/70216300
程序执行入口啊
在src/caffe/tools/caffe.cpp 包含了程序的入口函数main 。
#ifndef CAFFE_SOLVER_HPP_#define CAFFE_SOLVER_HPP_#include <boost/function.hpp>#include <string>#include <vector>#include "caffe/net.hpp"#include "caffe/solver_factory.hpp"namespace caffe { namespace SolverAction { enum Enum { NONE = 0, // Take no special action. STOP = 1, // Stop training. snapshot_after_train controls whether a // snapshot is created. SNAPSHOT = 2 // Take a snapshot, and keep training. }; }/** * @brief Type of a function that returns a Solver Action enumeration. */typedef boost::function<SolverAction::Enum()> ActionCallback;/** * @brief An interface for classes that perform optimization on Net%s. * * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. */template <typename Dtype>class Solver { public: explicit Solver(const SolverParameter& param, const Solver* root_solver = NULL); explicit Solver(const string& param_file, const Solver* root_solver = NULL); void Init(const SolverParameter& param); void InitTrainNet(); void InitTestNets(); // Client of the Solver optionally may call this in order to set the function // that the solver uses to see what action it should take (e.g. snapshot or // exit training early). void SetActionFunction(ActionCallback func); SolverAction::Enum GetRequestedAction(); // The main entry of the solver function. In default, iter will be zero. Pass // in a non-zero iter number to resume training for a pre-trained net. virtual void Solve(const char* resume_file = NULL); inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } void Step(int iters); // The Restore method simply dispatches to one of the // RestoreSolverStateFrom___ protected methods. You should implement these // methods to restore the state from the appropriate snapshot type. void Restore(const char* resume_file); // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be // written to disk together with the learned net. void Snapshot(); virtual ~Solver() {} inline const SolverParameter& param() const { return param_; } inline shared_ptr<Net<Dtype> > net() { return net_; } inline const vector<shared_ptr<Net<Dtype> > >& test_nets() { return test_nets_; } int iter() { return iter_; } // Invoked at specific points during an iteration class Callback { protected: virtual void on_start() = 0; virtual void on_gradients_ready() = 0; template <typename T> friend class Solver; }; const vector<Callback*>& callbacks() const { return callbacks_; } void add_callback(Callback* value) { callbacks_.push_back(value); } void CheckSnapshotWritePermissions(); /** * @brief Returns the solver type. */ virtual inline const char* type() const { return ""; } protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; string SnapshotFilename(const string extension); string SnapshotToBinaryProto(); string SnapshotToHDF5(); // The test routine void TestAll(); void Test(const int test_net_id = 0); virtual void SnapshotSolverState(const string& model_filename) = 0; virtual void RestoreSolverStateFromHDF5(const string& state_file) = 0; virtual void RestoreSolverStateFromBinaryProto(const string& state_file) = 0; void DisplayOutputBlobs(const int net_id); void UpdateSmoothedLoss(Dtype loss, int start_iter, int average_loss); SolverParameter param_; int iter_; int current_step_; shared_ptr<Net<Dtype> > net_; vector<shared_ptr<Net<Dtype> > > test_nets_; vector<Callback*> callbacks_; vector<Dtype> losses_; Dtype smoothed_loss_; // The root solver that holds root nets (actually containing shared layers) // in data parallelism const Solver* const root_solver_; // A function that can be set by a client of the Solver to provide indication // that it wants a snapshot saved and/or to exit early. ActionCallback action_request_function_; // True iff a request to stop early was received. bool requested_early_exit_; DISABLE_COPY_AND_ASSIGN(Solver);};/** * @brief Solver that only computes gradients, used as worker * for multi-GPU training. */template <typename Dtype>class WorkerSolver : public Solver<Dtype> { public: explicit WorkerSolver(const SolverParameter& param, const Solver<Dtype>* root_solver = NULL) : Solver<Dtype>(param, root_solver) {} protected: void ApplyUpdate() {} void SnapshotSolverState(const string& model_filename) { LOG(FATAL) << "Should not be called on worker solver."; } void RestoreSolverStateFromBinaryProto(const string& state_file) { LOG(FATAL) << "Should not be called on worker solver."; } void RestoreSolverStateFromHDF5(const string& state_file) { LOG(FATAL) << "Should not be called on worker solver."; }};} // namespace caffe#endif // CAFFE_SOLVER_HPP_
0 0
- (二)caffe 网络训练执行流程
- (Caffe,LeNet)网络训练流程(二)
- (Caffe,LeNet)网络训练流程(二)
- caffe训练流程
- caffe训练主要流程
- caffe训练流程
- Caffe训练流程
- Caffe源码阅读(粗读)--网络训练
- caffe训练自己的数据(二)
- 调用caffe进行网络训练(caffe.cpp 解析)
- Caffe训练源码基本流程
- caffe训练分类完整流程
- Caffe训练源码基本流程
- Caffe分类网训练流程
- Caffe训练源码基本流程
- caffe学习-----3 .m文件流程(训练或者测试)
- py-faster-rcnn流程(5)——训练RPN网络二阶段
- py-faster-rcnn流程(6)——训练Fastrcnn网络二阶段
- 台湾李宏毅教授深度学习
- linux c 命令行解析
- Android开发经验(不积跬步无以至千里)
- The specified child already has a parent. You must call removeView() on the child's parent first.错误
- 【DP】洛谷 P1006 传纸条
- (二)caffe 网络训练执行流程
- JAVA设计模式之工厂模式(简单工厂模式+工厂方法模式)
- exit函数和return返回之间的区别
- hive 三种去重方式
- bash shell(2)
- Struts1和Struts2的区别和对比(完整版)
- 5个数求最值
- RobotFramwork
- 【个人笔记重点,不作为参考】主题:跨域通信