caffe: unknown layer type/unknown solver type

来源:互联网 发布:凯撒豪庭骗局 知乎 编辑:程序博客网 时间:2024/04/30 19:18

Only for windows

caffe 通过定义静态全局对象的形式(利用对象的构造函数),来创建一个静态类型的变量map<type,creator>来存储其所支持的层类型和解决器类型:

static LayerRegisterer<float> g_creator_f_##type(#type, creator<float>);    static LayerRegisterer<double> g_creator_d_##type(#type, creator<double>)

VS编译生成caffe的静态库lib的时候,不会初始化上面的静态全局对象,也就是不会调用LayerRegisterer/LayerRegisterer的构造函数。其中LayerRegisterer的构造函数如下,其完成的功能是向静态容器(map<type,creator>)中添加键值对,键为string类型,如”PowerLayer”,creator是一个函数指针(函数是用来new一个PowerLayer类对象)。因为静态库不会初始化静态全局对象,因此使用编译的lib会报unknown layer type/unknown solver type 的错误,即在容器中找不到层类型或解决器类型。

template <typename Dtype>LayerRegisterer<Dtype>::LayerRegisterer(    const string& type,    shared_ptr<Layer<Dtype> > (*creator)(const LayerParameter&)) {  // LOG(INFO) << "Registering layer type: " << type;  LayerRegistry<Dtype>::AddCreator(type, creator);} 

使用caffe.lib出现的问题:
例如:

  1. unknown solver type: SGD
  2. unknown layer type: Data

等等类似的问题。

方法1:使用caffe.lib,但是加入下面的头文件:

caffe_reg.h

#ifndef CAFFE_REG_H#define CAFFE_REG_H//layer#include<caffe/layers/conv_layer.hpp> #include<caffe/layers/pooling_layer.hpp>#include<caffe/layers/lrn_layer.hpp>#include<caffe/layers/relu_layer.hpp>#include<caffe/layers/sigmoid_layer.hpp>#include<caffe/layers/softmax_layer.hpp>#include<caffe/layers/tanh_layer.hpp>#include<caffe/layers/python_layer.hpp>#include<caffe/layers/absval_layer.hpp>#include<caffe/layers/accuracy_layer.hpp>#include<caffe/layers/argmax_layer.hpp>#include<caffe/layers/batch_norm_layer.hpp>#include<caffe/layers/batch_reindex_layer.hpp>#include<caffe/layers/bias_layer.hpp>#include<caffe/layers/bnll_layer.hpp>#include<caffe/layers/concat_layer.hpp>#include<caffe/layers/contrastive_loss_layer.hpp>#include<caffe/layers/crop_layer.hpp>#include<caffe/layers/data_layer.hpp>#include<caffe/layers/deconv_layer.hpp>#include<caffe/layers/dropout_layer.hpp>#include<caffe/layers/dummy_data_layer.hpp>#include<caffe/layers/eltwise_layer.hpp>#include<caffe/layers/elu_layer.hpp>#include<caffe/layers/embed_layer.hpp>#include<caffe/layers/euclidean_loss_layer.hpp>#include<caffe/layers/exp_layer.hpp>#include<caffe/layers/filter_layer.hpp>#include<caffe/layers/flatten_layer.hpp>#include<caffe/layers/hdf5_data_layer.hpp>#include<caffe/layers/hdf5_output_layer.hpp>#include<caffe/layers/hinge_loss_layer.hpp>#include<caffe/layers/im2col_layer.hpp>#include<caffe/layers/image_data_layer.hpp>#include<caffe/layers/infogain_loss_layer.hpp>#include<caffe/layers/inner_product_layer.hpp>#include<caffe/layers/input_layer.hpp>#include<caffe/layers/log_layer.hpp>#include<caffe/layers/lstm_layer.hpp>#include<caffe/layers/memory_data_layer.hpp>#include<caffe/layers/multinomial_logistic_loss_layer.hpp>#include<caffe/layers/mvn_layer.hpp>#include<caffe/layers/parameter_layer.hpp>#include<caffe/layers/power_layer.hpp>#include<caffe/layers/prelu_layer.hpp>#include<caffe/layers/reduction_layer.hpp>#include<caffe/layers/reshape_layer.hpp>#include<caffe/layers/rnn_layer.hpp>#include<caffe/layers/scale_layer.hpp>#include<caffe/layers/sigmoid_cross_entropy_loss_layer.hpp>#include<caffe/layers/silence_layer.hpp>#include<caffe/layers/slice_layer.hpp>#include<caffe/layers/softmax_loss_layer.hpp>#include<caffe/layers/split_layer.hpp>#include<caffe/layers/spp_layer.hpp>#include<caffe/layers/threshold_layer.hpp>#include<caffe/layers/tile_layer.hpp>#include<caffe/layers/window_data_layer.hpp>//solver#include<caffe/sgd_solvers.hpp>namespace caffe{    // 59 layers    extern INSTANTIATE_CLASS(ConvolutionLayer);    extern INSTANTIATE_CLASS(PoolingLayer);    extern INSTANTIATE_CLASS(LRNLayer);    extern INSTANTIATE_CLASS(ReLULayer);    extern INSTANTIATE_CLASS(SigmoidLayer);    extern INSTANTIATE_CLASS(SoftmaxLayer);    extern INSTANTIATE_CLASS(TanHLayer);    extern INSTANTIATE_CLASS(PythonLayer);    extern INSTANTIATE_CLASS(AbsValLayer);    extern INSTANTIATE_CLASS(AccuracyLayer);    extern INSTANTIATE_CLASS(ArgMaxLayer);    extern INSTANTIATE_CLASS(BatchNormLayer);    extern INSTANTIATE_CLASS(BatchReindexLayer);    extern INSTANTIATE_CLASS(BiasLayer);    extern INSTANTIATE_CLASS(BNLLLayer);    extern INSTANTIATE_CLASS(ConcatLayer);    extern INSTANTIATE_CLASS(ContrastiveLossLayer);    extern INSTANTIATE_CLASS(CropLayer);    extern INSTANTIATE_CLASS(DataLayer);    extern INSTANTIATE_CLASS(DeconvolutionLayer);    extern INSTANTIATE_CLASS(DropoutLayer);    extern INSTANTIATE_CLASS(DummyDataLayer);    extern INSTANTIATE_CLASS(EltwiseLayer);    extern INSTANTIATE_CLASS(ELULayer);    extern INSTANTIATE_CLASS(EmbedLayer);    extern INSTANTIATE_CLASS(EuclideanLossLayer);    extern INSTANTIATE_CLASS(ExpLayer);    extern INSTANTIATE_CLASS(FilterLayer);    extern INSTANTIATE_CLASS(FlattenLayer);    extern INSTANTIATE_CLASS(HDF5DataLayer);    extern INSTANTIATE_CLASS(HDF5OutputLayer);    extern INSTANTIATE_CLASS(HingeLossLayer);    extern INSTANTIATE_CLASS(Im2colLayer);    extern INSTANTIATE_CLASS(ImageDataLayer);    extern INSTANTIATE_CLASS(InfogainLossLayer);    extern INSTANTIATE_CLASS(InnerProductLayer);    extern INSTANTIATE_CLASS(InputLayer);    extern INSTANTIATE_CLASS(LogLayer);    extern INSTANTIATE_CLASS(LSTMLayer);    extern INSTANTIATE_CLASS(LSTMUnitLayer);    extern INSTANTIATE_CLASS(MemoryDataLayer);    extern INSTANTIATE_CLASS(MultinomialLogisticLossLayer);    extern INSTANTIATE_CLASS(MVNLayer);    extern INSTANTIATE_CLASS(ParameterLayer);    extern INSTANTIATE_CLASS(PowerLayer);    extern INSTANTIATE_CLASS(PReLULayer);    extern INSTANTIATE_CLASS(ReductionLayer);    extern INSTANTIATE_CLASS(ReshapeLayer);    extern INSTANTIATE_CLASS(RNNLayer);    extern INSTANTIATE_CLASS(ScaleLayer);    extern INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer);    extern INSTANTIATE_CLASS(SilenceLayer);    extern INSTANTIATE_CLASS(SliceLayer);    extern INSTANTIATE_CLASS(SoftmaxWithLossLayer);    extern INSTANTIATE_CLASS(SplitLayer);    extern INSTANTIATE_CLASS(SPPLayer);    extern INSTANTIATE_CLASS(ThresholdLayer);    extern INSTANTIATE_CLASS(TileLayer);    extern INSTANTIATE_CLASS(WindowDataLayer);    // 6 sovlers    extern INSTANTIATE_CLASS(AdaDeltaSolver);    extern INSTANTIATE_CLASS(AdaGradSolver);    extern INSTANTIATE_CLASS(AdamSolver);    extern INSTANTIATE_CLASS(NesterovSolver);    extern INSTANTIATE_CLASS(RMSPropSolver);    extern INSTANTIATE_CLASS(SGDSolver);}#endif

该方法利用主程序引用即初始化的原理。
很奇怪的是,通过上述的方法,我们发现除了Data和Parameter层不能注册,其他的57个层都能注册。如下:
这里写图片描述

将这两个的extern声明替换为注册:

REGISTER_LAYER_CLASS(Data);REGISTER_LAYER_CLASS(Parameter);

这样调用的话会触发源文件中全局静态对象的实例化,因为静态变量的作用于是本源文件,因此不会触发变量的重定义问题。但是通不过caffe中layer_factory.cpp中的AddCreator函数中的:

CHECK_EQ(registry.count(type), 0) << "Layer type " << type                       << " already registered.";

因此,我们屏蔽掉这条语句,然后将后面的语句修改为:

 if (registry.count(type) == 0)  {      registry[type] = creator;;  }  else  {      return;  }

即如果没有注册就注册,注册过了就返回。
然而重新编译caffe

注释:

全局变量(外部变量)的说明之前再冠以static 就构成了静态的全局变量。全局变量本身就是静态存储方式, 静态全局变量当然也是静态存储方式。 这两者在存储方式上并无不同。这两者的区别在于非静态全局变量的作用域是整个源程序, 当一个源程序由多个源文件组成时,非静态的全局变量在各个源文件中都是有效的。 而静态全局变量则限制了其作用域, 即只在定义该变量的源文件内有效, 在同一源程序的其它源文件中不能使用它。由于静态全局变量的作用域局限于一个源文件内,只能为该源文件内的函数公用,因此可以避免在其它源文件中引起错误。

完整的caffe_reg.h如下

#pragma once#ifndef CAFFE_REG_H#define CAFFE_REG_H//layer#include<caffe/layers/conv_layer.hpp> #include<caffe/layers/pooling_layer.hpp>#include<caffe/layers/lrn_layer.hpp>#include<caffe/layers/relu_layer.hpp>#include<caffe/layers/sigmoid_layer.hpp>#include<caffe/layers/softmax_layer.hpp>#include<caffe/layers/tanh_layer.hpp>#include<caffe/layers/python_layer.hpp>#include<caffe/layers/absval_layer.hpp>#include<caffe/layers/accuracy_layer.hpp>#include<caffe/layers/argmax_layer.hpp>#include<caffe/layers/batch_norm_layer.hpp>#include<caffe/layers/batch_reindex_layer.hpp>#include<caffe/layers/bias_layer.hpp>#include<caffe/layers/bnll_layer.hpp>#include<caffe/layers/concat_layer.hpp>#include<caffe/layers/contrastive_loss_layer.hpp>#include<caffe/layers/crop_layer.hpp>#include<caffe/layers/data_layer.hpp>#include<caffe/layers/deconv_layer.hpp>#include<caffe/layers/dropout_layer.hpp>#include<caffe/layers/dummy_data_layer.hpp>#include<caffe/layers/eltwise_layer.hpp>#include<caffe/layers/elu_layer.hpp>#include<caffe/layers/embed_layer.hpp>#include<caffe/layers/euclidean_loss_layer.hpp>#include<caffe/layers/exp_layer.hpp>#include<caffe/layers/filter_layer.hpp>#include<caffe/layers/flatten_layer.hpp>#include<caffe/layers/hdf5_data_layer.hpp>#include<caffe/layers/hdf5_output_layer.hpp>#include<caffe/layers/hinge_loss_layer.hpp>#include<caffe/layers/im2col_layer.hpp>#include<caffe/layers/image_data_layer.hpp>#include<caffe/layers/infogain_loss_layer.hpp>#include<caffe/layers/inner_product_layer.hpp>#include<caffe/layers/input_layer.hpp>#include<caffe/layers/log_layer.hpp>#include<caffe/layers/lstm_layer.hpp>#include<caffe/layers/memory_data_layer.hpp>#include<caffe/layers/multinomial_logistic_loss_layer.hpp>#include<caffe/layers/mvn_layer.hpp>#include<caffe/layers/parameter_layer.hpp>#include<caffe/layers/power_layer.hpp>#include<caffe/layers/prelu_layer.hpp>#include<caffe/layers/reduction_layer.hpp>#include<caffe/layers/reshape_layer.hpp>#include<caffe/layers/rnn_layer.hpp>#include<caffe/layers/scale_layer.hpp>#include<caffe/layers/sigmoid_cross_entropy_loss_layer.hpp>#include<caffe/layers/silence_layer.hpp>#include<caffe/layers/slice_layer.hpp>#include<caffe/layers/softmax_loss_layer.hpp>#include<caffe/layers/split_layer.hpp>#include<caffe/layers/spp_layer.hpp>#include<caffe/layers/threshold_layer.hpp>#include<caffe/layers/tile_layer.hpp>#include<caffe/layers/window_data_layer.hpp>//#include<caffe>//solver#include<caffe/sgd_solvers.hpp>namespace caffe{    // 2 layer, 很奇怪,其他57个层可以通过extern,但这两个需要添加注册才可以。    //extern INSTANTIATE_CLASS(DataLayer);    //extern INSTANTIATE_CLASS(ParameterLayer);    REGISTER_LAYER_CLASS(Data);    REGISTER_LAYER_CLASS(Parameter);    // 57 layers    extern INSTANTIATE_CLASS(ConvolutionLayer);    extern INSTANTIATE_CLASS(PoolingLayer);    extern INSTANTIATE_CLASS(LRNLayer);    extern INSTANTIATE_CLASS(ReLULayer);    extern INSTANTIATE_CLASS(SigmoidLayer);    extern INSTANTIATE_CLASS(SoftmaxLayer);    extern INSTANTIATE_CLASS(TanHLayer);    extern INSTANTIATE_CLASS(PythonLayer);    extern INSTANTIATE_CLASS(AbsValLayer);    extern INSTANTIATE_CLASS(AccuracyLayer);    extern INSTANTIATE_CLASS(ArgMaxLayer);    extern INSTANTIATE_CLASS(BatchNormLayer);    extern INSTANTIATE_CLASS(BatchReindexLayer);    extern INSTANTIATE_CLASS(BiasLayer);    extern INSTANTIATE_CLASS(BNLLLayer);    extern INSTANTIATE_CLASS(ConcatLayer);    extern INSTANTIATE_CLASS(ContrastiveLossLayer);    extern INSTANTIATE_CLASS(CropLayer);    extern INSTANTIATE_CLASS(DeconvolutionLayer);    extern INSTANTIATE_CLASS(DropoutLayer);    extern INSTANTIATE_CLASS(DummyDataLayer);    extern INSTANTIATE_CLASS(EltwiseLayer);    extern INSTANTIATE_CLASS(ELULayer);    extern INSTANTIATE_CLASS(EmbedLayer);    extern INSTANTIATE_CLASS(EuclideanLossLayer);    extern INSTANTIATE_CLASS(ExpLayer);    extern INSTANTIATE_CLASS(FilterLayer);    extern INSTANTIATE_CLASS(FlattenLayer);    extern INSTANTIATE_CLASS(HDF5DataLayer);    extern INSTANTIATE_CLASS(HDF5OutputLayer);    extern INSTANTIATE_CLASS(HingeLossLayer);    extern INSTANTIATE_CLASS(Im2colLayer);    extern INSTANTIATE_CLASS(ImageDataLayer);    extern INSTANTIATE_CLASS(InfogainLossLayer);    extern INSTANTIATE_CLASS(InnerProductLayer);    extern INSTANTIATE_CLASS(InputLayer);    extern INSTANTIATE_CLASS(LogLayer);    extern INSTANTIATE_CLASS(LSTMLayer);    extern INSTANTIATE_CLASS(LSTMUnitLayer);    extern INSTANTIATE_CLASS(MemoryDataLayer);    extern INSTANTIATE_CLASS(MultinomialLogisticLossLayer);    extern INSTANTIATE_CLASS(MVNLayer);    extern INSTANTIATE_CLASS(PowerLayer);    extern INSTANTIATE_CLASS(PReLULayer);    extern INSTANTIATE_CLASS(ReductionLayer);    extern INSTANTIATE_CLASS(ReshapeLayer);    extern INSTANTIATE_CLASS(RNNLayer);    extern INSTANTIATE_CLASS(ScaleLayer);    extern INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer);    extern INSTANTIATE_CLASS(SilenceLayer);    extern INSTANTIATE_CLASS(SliceLayer);    extern INSTANTIATE_CLASS(SoftmaxWithLossLayer);    extern INSTANTIATE_CLASS(SplitLayer);    extern INSTANTIATE_CLASS(SPPLayer);    extern INSTANTIATE_CLASS(ThresholdLayer);    extern INSTANTIATE_CLASS(TileLayer);    extern INSTANTIATE_CLASS(WindowDataLayer);    // 6 sovlers    extern INSTANTIATE_CLASS(AdaDeltaSolver);    extern INSTANTIATE_CLASS(AdaGradSolver);    extern INSTANTIATE_CLASS(AdamSolver);    extern INSTANTIATE_CLASS(NesterovSolver);    extern INSTANTIATE_CLASS(RMSPropSolver);    extern INSTANTIATE_CLASS(SGDSolver);}#endif

测试代码如下

#include"stdafx.h"#define  CPU_ONLY#include"caffe_reg.h"#include<vector>#include<iostream>#include<caffe/caffe.hpp>#include<caffe/solver_factory.hpp>using namespace caffe;using namespace std;int main(void){    //打印出所有注册的层    vector<string> typeList = LayerRegistry<float>::LayerTypeList();    cout << "层数: " << typeList.size() << endl;    for (int i = 0; i < typeList.size(); i++)    {        cout << typeList[i] << " ";    }    cout << endl;    SolverParameter solver_param;    string solver_file = "lenet_solver1.prototxt";    ReadSolverParamsFromTextFileOrDie(solver_file, &solver_param);    //boost::shared_ptr<Solver<float> > solver(    //  SolverRegistry<float>::CreateSolver(solver_param));    Solver<float> * solver = SolverRegistry<float>::CreateSolver(solver_param);//Sover<float> 是纯虚函数,所有不能实例化对象,通过指针的形式指向子类。    const int maxIter = 1;    float loss[maxIter];    float accuracy[maxIter];    int  step[maxIter];    for (int i = 0; i < maxIter; i++)    {        solver->Step(1);        step[i] = solver->iter();        loss[i] = *(solver->net()->blob_by_name("loss")->mutable_cpu_data());        accuracy[i] = *(solver->test_nets()[0]->blob_by_name("accuracy")->mutable_cpu_data());    }    FILE *fp = fopen("result.txt", "w");    for (int i = 0; i < maxIter; i++)    {        fprintf(fp, "%d %.4f %.4f\n", step[i], loss[i], accuracy[i]);    }}

方法2:使用caffe.dll:

编译dll库,主要参考文章:<编译caffe的dll库>
我们跟随文献6的方法开始。

在下面每个将要修改的.h头文件中都要加入宏定义:

#ifdef BUILD_DLL#define OS_API __declspec(dllexport)#else#define OS_API __declspec(dllimport)#endif,

blob.hpp

class OS_API Blob

net.hpp

class OS_API Net

caffe.pb.h

class OS_API BlobShapeclass OS_API BlobProtoclass OS_API BlobProtoVectorclass OS_API SolverParameterclass OS_API LayerParameterclass OS_API NetParameterclass OS_API FillerParameter

common.hpp

OS_API void GlobalInit(int* pargc, char*** pargv);class OS_API Caffeclass OS_API RNG

io.hpp

OS_API bool ReadProtoFromTextFile(const char* filename, Message* proto);OS_API bool ReadProtoFromBinaryFile(const char* filename, Message* proto);OS_API void WriteProtoToBinaryFileOS_API bool ReadImageToDatumOS_API bool DecodeDatumNative(Datum* datum);void OS_API WriteProtoToTextFile

db.hpp

class OS_API DBOS_API DB* GetDB(DataParameter::DB backend);OS_API DB* GetDB(const string& backend);

benchmark.hpp

class OS_API Timer 

upgrade_proto.hpp

OS_API void ReadSolverParamsFromTextFileOrDieOS_API bool NetNeedsUpgrade(const NetParameter& net_param);OS_API bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param);OS_API bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param);OS_API boolbool UpgradeSolverAsNeeded(const string& param_file, SolverParameter* param);

signal_handler.h

class OS_API SignalHandler

solver.hpp

class OS_API Solver

parallel.hpp

class OS_API P2PSync

layer.hpp

class OS_API Layer

math_functions.hpp

OS_API unsigned int caffe_rng_rand();OS_API Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y);OS_API void caffe_rng_gaussianOS_API void caffe_rng_bernoulli(const int n, const Dtype p, int* r);OS_API void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r);OS_API void caffe_copy(const int N, const Dtype *X, Dtype *Y);OS_API void caffe_set(const int N, const Dtype alpha, Dtype *X);OS_API void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r);

syncedmem.hpp

class OS_API SyncedMemory

solver_factory.hpp

class OS_API SolverRegistry

layer_factory.hpp

class OS_API LayerRegisterer

像文献[6]中所说的那样:“因为这个头文件是根据src\caffe\proto\caffe.proto自动生成的,所以编译的时候还把这个文件改了名字,否则就把修改后的caffe.pb.h又覆盖了”。

我们通过下面的操作来完成上面模糊的说明:

这里写图片描述

上图中没有步骤2,步骤2就是修改:

E:\caffe-windows\scripts\build\include\caffe\proto\caffe.pb.h

matlab训练caffe模型示例代码:

clc;clearvars;close all;% app caffe class to dirif exist('../+caffe', 'dir')  addpath('..');else  error('Please run this demo from caffe/matlab/demo');endcaffe.reset_all;% caffe.set_mode_cpu();% solver = caffe.Solver('lenet_solver1.prototxt') ;% solver.solve();format long %设置精度,caffe的损失貌似精度在小数点后面好几位  addpath('..')  caffe.reset_all%重设网络,否则载入两个网络会卡住  solver=caffe.Solver('lenet_solver1.prototxt'); %载入网络  loss=[];%记录相邻两个loss  accuracy=[];%记录相邻两个accuracy  hold on%画图用的  accuracy_init=0;  loss_init=0;  for i=1:10000      solver.step(1);%每迭代一次就取一次loss和accuracy      iter=solver.iter();      loss=solver.net.blobs('loss').get_data();%取训练集的loss      accuracy=solver.test_nets.blobs('accuracy').get_data();%取验证集的accuracy      disp(accuracy);    %画loss折线图      x=[i-1,i];      y=[loss_init loss];      plot(x,y,'r-')      drawnow      loss_init=loss;  end  

VS2015训练caffe模型的代码

#include"stdafx.h"#define  CPU_ONLY#include<vector>#include<iostream>#include<caffe/caffe.hpp>using namespace caffe;using namespace std;int main(void){    SolverParameter solver_param;    string solver_file = "lenet_solver1.prototxt";    ReadSolverParamsFromTextFileOrDie(solver_file, &solver_param);    //boost::shared_ptr<Solver<float> > solver(    //  SolverRegistry<float>::CreateSolver(solver_param));    Solver<float> * solver = SolverRegistry<float>::CreateSolver(solver_param);    const int maxIter = 1;    float loss[maxIter];    float accuracy[maxIter];    int  step[maxIter];    for (int i = 0; i < maxIter; i++)    {        solver->Step(1);        step[i] = solver->iter();        loss[i] = *(solver->net()->blob_by_name("loss")->mutable_cpu_data());        accuracy[i] = *(solver->test_nets()[0]->blob_by_name("accuracy")->mutable_cpu_data());    }    FILE *fp = fopen("result.txt", "w");    for (int i = 0; i < maxIter; i++)    {        fprintf(fp, "%d %.4f %.4f\n", step[i], loss[i], accuracy[i]);    }}


caffe实现的层59个,6个求解方法。

ConvolutionPoolingLRNReLUSigmoidSoftmaxTanHPythonAbsValAccuracyArgMaxBatchNormBatchReindexBiasBNLLConcatContrastiveLossCropDataDeconvolutionDropoutDummyDataEltwiseELUEmbedEuclideanLossExpFilterFlattenHDF5DataHDF5OutputHingeLossIm2colImageDataInfogainLossInnerProductInputLogLSTMLSTMUnitMemoryDataMultinomialLogisticLossMVNParameterPower  PReLUReductionReshapeRNNScaleSigmoidCrossEntropyLossSilenceSliceSoftmaxWithLossSplitSPPThresholdTileWindowDataAdaDeltaSolverAdaGradSolverAdamSolverNesterovSolverRMSPropSolverSGDSolver

参考文献:

1.https://github.com/ih4cku/blog/issues/93
2.http://blog.luoyetx.com/2016/02/reading-caffe-3/ [Caffe 源码阅读 Layer 加载机制]
3.https://github.com/BVLC/caffe/issues/4010 [Solvers not registered during building? #4010]
4.https://github.com/BVLC/caffe/pull/4739 [Hide implementation of LayerRegistry::CreatorRegistry and SolverRegistry::CreatorRegistry singletons #4739]
5.http://www.cnblogs.com/JimmyTY/p/5856217.html [Linux C/C++ 链接选项之静态库–whole-archive,–no-whole-archive和–start-group, –end-group]
6. http://blog.csdn.net/ccemmawatson/article/details/51539182 [编译caffe的dll库]
7.http://blog.csdn.net/birdwcp/article/details/53580068 [ Check failed: registry.count(t ype) == 1 (0 vs. 1) Unknown layer type: Input (known types: Input )]

0 0
原创粉丝点击