ubuntu下caffe手写数字识别问题、python接口测试

来源:互联网 发布:龙岗淘宝培训 编辑:程序博客网 时间:2024/04/30 05:32

下面这串代码mnist.py主要是生成一些相关的文件如train.prototxt/test.prototxt、solver.prototxt等 感谢两位博主

http://www.cnblogs.com/denny402/p/5684431.html
http://blog.csdn.net/houwenbin1986/article/details/52956101#

# -*- coding: utf-8 -*-import syssys.path.append('/home/xhj/caffe/python') #导入caffe路径import caffefrom caffe import layers as L,params as P,proto,to_proto#设定文件的保存路径root='/home/xhj/caffe/examples/mnist/trainList/mnist/'                           #根目录train_list=root+'mnist/train/train.txt'     #训练图片列表test_list=root+'mnist/test/test.txt'        #测试图片列表train_proto=root+'mnist/train.prototxt'     #训练配置文件test_proto=root+'mnist/test.prototxt'       #测试配置文件solver_proto=root+'mnist/solver.prototxt'   #参数文件#编写一个函数,生成配置文件prototxtdef Lenet(img_list,batch_size,include_acc=False):    #第一层,数据输入层,以ImageData格式输入    data, label = L.ImageData(source=img_list, batch_size=batch_size, ntop=2,root_folder=root,        transform_param=dict(scale= 0.00390625))    #第二层:卷积层    conv1=L.Convolution(data, kernel_size=5, stride=1,num_output=20, pad=0,weight_filler=dict(type='xavier'))    #池化层    pool1=L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)    #卷积层    conv2=L.Convolution(pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'))    #池化层    pool2=L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)    #全连接层    fc3=L.InnerProduct(pool2, num_output=500,weight_filler=dict(type='xavier'))    #激活函数层    relu3=L.ReLU(fc3, in_place=True)    #全连接层    fc4 = L.InnerProduct(relu3, num_output=10,weight_filler=dict(type='xavier'))    #softmax层    loss = L.SoftmaxWithLoss(fc4, label)    if include_acc:             # test阶段需要有accuracy层        acc = L.Accuracy(fc4, label)        return to_proto(loss, acc)    else:        return to_proto(loss)def write_net():    #写入train.prototxt    with open(train_proto, 'w') as f:        f.write(str(Lenet(train_list,batch_size=64)))    #写入test.prototxt    with open(test_proto, 'w') as f:        f.write(str(Lenet(test_list,batch_size=100, include_acc=True)))#编写一个函数,生成参数文件def gen_solver(solver_file,train_net,test_net):    s=proto.caffe_pb2.SolverParameter()    s.train_net =train_net    s.test_net.append(test_net)    s.test_interval = 938    #60000/64,测试间隔参数:训练完一次所有的图片,进行一次测试    s.test_iter.append(500)  #50000/100 测试迭代次数,需要迭代500次,才完成一次所有数据的测试    s.max_iter = 9380       #10 epochs , 938*10,最大训练次数    s.base_lr = 0.01    #基础学习率    s.momentum = 0.9    #动量    s.weight_decay = 5e-4  #权值衰减项    s.lr_policy = 'step'   #学习率变化规则    s.stepsize=3000         #学习率变化频率    s.gamma = 0.1          #学习率变化指数    s.display = 20         #屏幕显示间隔    s.snapshot = 938       #保存caffemodel的间隔    s.snapshot_prefix = root+'mnist/lenet'   #caffemodel前缀    s.type ='SGD'         #优化算法    s.solver_mode = proto.caffe_pb2.SolverParameter.GPU    #加速    #写入solver.prototxt    with open(solver_file, 'w') as f:        f.write(str(s))def training(solver_proto):    #caffe.set_device(0)    #caffe.set_mode_gpu()    caffe.set_mode_cpu()    solver = caffe.SGDSolver(solver_proto)    solver.solve()if __name__ == '__main__':    write_net()    gen_solver(solver_proto,train_proto,test_proto)    training(solver_proto)

2、训练好模型后,生成识别用的网络模型mkdeploy.py

# -*- coding: utf-8 -*-import syssys.path.append('/home/xhj/caffe/python')import caffefrom caffe import layers as L,params as P,to_protoroot = '/home/xhj/caffe/examples/mnist/trainList/mnist/'deploy = root+'mnist/deploy.prototxt'    #文件保存路径def create_deploy():    #少了第一层,data层    conv1 = L.Convolution(bottom='data', kernel_size=5, stride=1,num_output=20, pad=0,weight_filler=dict(type='xavier'))    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)    conv2 = L.Convolution(pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'))    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)    fc3 = L.InnerProduct(pool2, num_output=500,weight_filler=dict(type='xavier'))    relu3 = L.ReLU(fc3, in_place=True)    fc4 = L.InnerProduct(relu3, num_output=10,weight_filler=dict(type='xavier'))    #最后没有accuracy层,但有一个Softmax层    prob = L.Softmax(fc4)    return to_proto(prob)def write_deploy():    with open(deploy, 'w') as f:        f.write('name:"Lenet"\n')        f.write('input:"data"\n')        f.write('input_dim:1\n')        f.write('input_dim:3\n')        f.write('input_dim:28\n')        f.write('input_dim:28\n')        f.write(str(create_deploy()))if __name__ == '__main__':    write_deploy()
然后测试自己的手写代码
#coding=utf-8    import caffe  import numpy as np    root = 'D:/MyWorks/caffe-windows-master/examples/lenet5/'   #根目录  deploy = root + 'mnist/deploy.prototxt'    #deploy文件  caffe_model = root + 'mnist/lenet_iter_9380.caffemodel'   #训练好的 caffemodel  img = root + 'mnist/test/9/00479.png'    #随机找的一张待测图片  labels_filename = root + 'mnist/test/labels.txt'  #类别名称文件,将数字标签转换回类别名称    net = caffe.Net(deploy,caffe_model,caffe.TEST)   #加载model和network    #图片预处理设置  transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})  #设定图片的shape格式(1,3,28,28)  transformer.set_transpose('data', (2,0,1))    #改变维度的顺序,由原始图片(28,28,3)变为(3,28,28)  #transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))    #减去均值,前面训练模型时没有减均值,这儿就不用  transformer.set_raw_scale('data', 255)    # 缩放到【0,255】之间  transformer.set_channel_swap('data', (2,1,0))   #交换通道,将图片由RGB变为BGR    im = caffe.io.load_image(img)                   #加载图片  net.blobs['data'].data[...] = transformer.preprocess('data',im)      #执行上面设置的图片预处理操作,并将图片载入到blob中    #执行测试  out = net.forward()    labels = np.loadtxt(labels_filename, str, delimiter='\t')   #读取类别名称文件  prob= net.blobs['Softmax1'].data[0].flatten() #取出最后一层(Softmax)属于某个类别的概率值,并打印  print prob  order=prob.argsort()[-1]  #将概率值排序,取出最大值所在的序号   print 'the class is:',labels[order]   #将该序号转换成对应的类别名称,并打印  

                                             
0 0