【Python】【Caffe】一、生成prototxt文件《python调用caffe模块》
来源:互联网 发布:sql 获取exec 返回值 编辑:程序博客网 时间:2024/05/19 20:38
GitHub代码地址:https://github.com/HandsomeHans/Use-Python-to-call-Caffe-module
前言
写这一系列博文前真的想了好久,有种无从下手的感觉。还是功力太浅,越是这样越要硬着头皮写。加油!
我先将将各个函数单独放出来,最后在放出完整代码。各个函数中单独用到的模块,我在函数中单独加载,这样方便将代码独立出来单独运行。
import caffe是全局通用模块,我就不在每个函数中单独加载了。
一、加载caffe模块
为了以后方便使用,把caffe模块放到python默认路径下,这样在任意目录下就都能加载caffe模块了。
caffe编译通过后运行:
make pycaffesudo cp -r python/caffe/ /usr/local/lib/python/dist-packages #有些朋友路径是site-packages,这个因人而异。
这个时候运行python,import caffe,会提示找不到caffe的动态库。
可以将 $CAFFE_ROOT/.build_release/lib/ 加到环境变量中去,
也可以将该动态库复制到/usr/lib/ 或者/usr/local/lib 目录下。
此时,应该就可以在任意目录下运行python,import caffe了。
二、生成训练和测试prototxt文件
生成网络结构文件我找到了两种方法,下面这种通过定义一个NetSpec()实例n的方法是比较好的,所以我先写的这个方法。这种方法生成的文件内,各个层的名字和输出blob的名字就是等号前面定义的n的方法。def lenet(lmdb, batch_size, include_acc=False): from caffe import layers as L from caffe import params as P n = caffe.NetSpec() # 具体每个层内的参数,可以对照现有的prototxt文件。我这里写的并不全。 n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb, transform_param=dict(scale=1./255), ntop=2) # ntop表示两个输出 n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) if include_acc: # 生成测试文件时,需要有计算准确率的层。 n.acc = L.Accuracy(n.ip2, n.label) return n.to_proto() # 注意这里的to_proto()不用带参数。def write_lenet(): with open('./doc/train_lenet.prototxt','w') as f: f.write(str(lenet('./doc/mnist_train_lmdb', 64))) with open('./doc/test_lenet.prototxt', 'w') as f: f.write(str(lenet('./doc/mnist_test_lmdb', 100, True)))
三、生成deploy文件
这里我放出生成网络结构文件的第二种方法,虽然我并不推荐使用这个方法。其实deploy直接拿上面生成好的文件改就行,很简单。
def deploy(): from caffe import layers as L from caffe import params as P from caffe import to_proto # deploy文件没有数据层 conv1 = L.Convolution(bottom='data', kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) ip1 = L.InnerProduct(pool2, num_output=500, weight_filler=dict(type='xavier')) relu1 = L.ReLU(ip1, in_place=True) ip2 = L.InnerProduct(relu1, num_output=10, weight_filler=dict(type='xavier')) prob = L.Softmax(ip2) # 最后一层不用计算loss,输出概率。 return to_proto(prob) # 这里需要带参数def write_deploy(): with open('doc/deploy_lenet.prototxt', 'w') as f: f.write('name: "Lenet"\n') f.write('input: "data"\n') f.write('input_dim: 1\n') f.write('input_dim: 3\n') f.write('input_dim: 28\n') f.write('input_dim: 28\n') f.write(str(deploy()))
四、生成solver文件
方法一是用字典生成:
def solver_dict(): solver_file='doc/solver_lenet.prototxt' sp={} sp['train_net']='"doc/train_lenet.prototxt"' sp['test_net']='"doc/test_lenet.prototxt"' sp['test_iter']='100' sp['test_interval']='500' sp['display']='100' sp['max_iter']='10000' sp['base_lr']='0.01' sp['lr_policy']='"inv"' sp['gamma']='0.0001' sp['power']='0.75' sp['momentum']='0.9' sp['weight_decay']='0.0005' sp['snapshot']='5000' sp['snapshot_prefix']='"models/lenet"' sp['solver_mode']='GPU' sp['solver_type']='SGD' sp['device_id']='0' with open(solver_file, 'w') as f: for key, value in sp.items(): if not(type(value) is str): raise TypeError('All solver parameters must be string') f.write('%s: %s\n' %(key, value))
方法二是调用caffe模块生成,这种方法生成的文件内,小数部分会有很小的损失。处女座强迫症犯了!
def solver_caffe(): from caffe.proto import caffe_pb2 s = caffe_pb2.SolverParameter() solver_file='doc/solver_lenet.prototxt' s.train_net = 'doc/train_lenet.prototxt' s.test_net.append('doc/test_lenet.prototxt') s.test_interval = 500 s.test_iter.append(100) s.display = 100 s.max_iter = 10000 s.base_lr = 0.01 s.lr_policy = "inv" s.gamma = 0.0001 s.power = 0.75 s.momentum = 0.9 s.weight_decay = 0.0005 s.snapshot = 5000 s.snapshot_prefix = "models/lenet" s.type = "SGD" s.solver_mode = caffe_pb2.SolverParameter.GPU with open(solver_file, 'w') as f: f.write(str(s))
五、完整代码:
#!/usr/bin/env python2# -*- coding: utf-8 -*-"""Created on Sat Jul 29 11:16:29 2017@author: hans"""import caffedef lenet(lmdb, batch_size, include_acc=False): from caffe import layers as L from caffe import params as P n = caffe.NetSpec() n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb, transform_param=dict(scale=1./255), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) if include_acc: n.acc = L.Accuracy(n.ip2, n.label) return n.to_proto()def write_lenet(): with open('./doc/train_lenet.prototxt','w') as f: f.write(str(lenet('./doc/mnist_train_lmdb', 64))) with open('./doc/test_lenet.prototxt', 'w') as f: f.write(str(lenet('./doc/mnist_test_lmdb', 100, True)))def deploy(): from caffe import layers as L from caffe import params as P from caffe import to_proto conv1 = L.Convolution(bottom='data', kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) ip1 = L.InnerProduct(pool2, num_output=500, weight_filler=dict(type='xavier')) relu1 = L.ReLU(ip1, in_place=True) ip2 = L.InnerProduct(relu1, num_output=10, weight_filler=dict(type='xavier')) prob = L.Softmax(ip2) return to_proto(prob)def write_deploy(): with open('doc/deploy_lenet.prototxt', 'w') as f: f.write('name: "Lenet"\n') f.write('input: "data"\n') f.write('input_dim: 1\n') f.write('input_dim: 3\n') f.write('input_dim: 28\n') f.write('input_dim: 28\n') f.write(str(deploy())) def solver_dict(): solver_file='doc/solver_lenet.prototxt' sp={} sp['train_net']='"doc/train_lenet.prototxt"' sp['test_net']='"doc/test_lenet.prototxt"' sp['test_iter']='100' sp['test_interval']='500' sp['display']='100' sp['max_iter']='10000' sp['base_lr']='0.01' sp['lr_policy']='"inv"' sp['gamma']='0.0001' sp['power']='0.75' sp['momentum']='0.9' sp['weight_decay']='0.0005' sp['snapshot']='5000' sp['snapshot_prefix']='"models/lenet"' sp['solver_mode']='GPU' sp['solver_type']='SGD' sp['device_id']='0' with open(solver_file, 'w') as f: for key, value in sp.items(): if not(type(value) is str): raise TypeError('All solver parameters must be string') f.write('%s: %s\n' %(key, value)) def solver_caffe(): from caffe.proto import caffe_pb2 s = caffe_pb2.SolverParameter() solver_file='doc/solver_lenet.prototxt' s.train_net = 'doc/train_lenet.prototxt' s.test_net.append('doc/test_lenet.prototxt') s.test_interval = 500 s.test_iter.append(100) s.display = 100 s.max_iter = 10000 s.base_lr = 0.01 s.lr_policy = "inv" s.gamma = 0.0001 s.power = 0.75 s.momentum = 0.9 s.weight_decay = 0.0005 s.snapshot = 5000 s.snapshot_prefix = "models/lenet" s.type = "SGD" s.solver_mode = caffe_pb2.SolverParameter.GPU with open(solver_file, 'w') as f: f.write(str(s))def train(): caffe.set_device(0) caffe.set_mode_gpu() solver = caffe.SGDSolver('doc/solver_lenet.prototxt') solver.solve()if __name__ == '__main__': write_lenet()# write_deploy()# solver_dict()# solver_caffe()# train()
阅读全文
0 0
- 【Python】【Caffe】一、生成prototxt文件《python调用caffe模块》
- 利用caffe的Python接口生成prototxt文件
- 利用python脚本生成caffe的prototxt文件
- 利用caffe的Python接口生成prototxt文件
- 【Python】【Caffe】三、生成.npy均值文件《python调用caffe模块》
- caffe用python产生prototxt文件
- caffe源码学习——用python定义网络时,源代码生成prototxt文件的原理
- 在python中编写caffe的prototxt文件
- 【Python】【Caffe】二、训练输出可视化《python调用caffe模块》
- 【Python】【Caffe】四、classification检测模型《python调用caffe模块》
- Creating caffe net prototxt files in python!
- caffe solver.prototxt文件
- 【caffe】caffe的python接口学习:生成solver文件
- python调用caffe
- python生成caffe binary proto文件
- caffe生成lenet-5的deploy.prototxt文件
- caffe学习笔记-模型代码生成.prototxt文件
- caffe中 solver.prototxt文件
- [DFS] Codeforces 510B:Fox And Two Dot
- 寻找最大数(三)
- 自定义可折叠按钮
- Oil Deposits
- CF 833B The Bakery(dp+线段树)
- 【Python】【Caffe】一、生成prototxt文件《python调用caffe模块》
- 弱/水!!题
- 51nod 1057 N的阶乘
- navicat报错
- 字符串的拷贝
- 闭包问题的理解及总结
- 关于路径
- Java进阶(三十三)java基础-filter
- My Debug