tensorflow对自己的数据进行训练(选择性的恢复权值)(26)---《深度学习》
来源:互联网 发布:淘宝运营专员岗位职责 编辑:程序博客网 时间:2024/03/29 14:30
利用生成的批数据对inception_v3模型进行微调,然后进行训练!
1)利用slim.assign_from_checkpoint_fn函数进行恢复
#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()): logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False) shape=logits.get_shape().as_list() dim=1 for d in shape[1:]: dim*=d fc_=tf.reshape(logits,[-1,dim]) fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer()) fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer()) logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases) predictions=tf.nn.softmax(logits_) #cross_entropy = -tf.reduce_sum(y*tf.log(predictions)) cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_)) #cross_entropy_mean=tf.reduce_mean(cross_entropy) train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy) correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1)) #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32)) accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess: o_dir="E:/test" num_classes=182 batch_size=3 epoches=2 batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size)) init_fn = slim.assign_from_checkpoint_fn( os.path.join("E:/", 'inception_v3.ckpt'), slim.get_model_variables(), ignore_missing_vars=True) init_op=tf.global_variables_initializer() sess.run(init_op) init_fn(sess) for epoch in range(epoches): for batch in batches: sess.run(train_step,feed_dict={X:batch[0],y:batch[1]}) acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]}) print(acc) print("Done")
2)利用saver.restore进行恢复
原始版:
#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()): logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False) exclude=[] variables_to_restore=slim.get_variables_to_restore(exclude=['Mixed_7c']) ''' for v in variables_to_restore: if 'Mixed_7c' not in v.name: exclude.append(v) ''' shape=logits.get_shape().as_list() dim=1 for d in shape[1:]: dim*=d fc_=tf.reshape(logits,[-1,dim]) fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer()) fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer()) logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases) predictions=tf.nn.softmax(logits_) #cross_entropy = -tf.reduce_sum(y*tf.log(predictions)) cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_)) #cross_entropy_mean=tf.reduce_mean(cross_entropy) train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy) correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1)) #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32)) accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess: o_dir="E:/test" num_classes=182 batch_size=3 epoches=2 batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size)) sess.run(tf.global_variables_initializer()) #加载需要恢复的权重,舍弃掉不需要恢复的权值 saver=tf.train.Saver(variables_to_restore) saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt")) for epoch in range(epoches): for batch in batches: sess.run(train_step,feed_dict={X:batch[0],y:batch[1]}) acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]}) print(acc) print("Done")
改进版:
#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()): logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False) variables_to_restore_t=[] variables_to_restore=slim.get_variables_to_restore() for v in variables_to_restore: if 'Mixed_7c' not in v.name: variables_to_restore_t.append(v) variables_to_restore = variables_to_restore_t for v in variables_to_restore: print (v.name) shape=logits.get_shape().as_list() dim=1 for d in shape[1:]: dim*=d fc_=tf.reshape(logits,[-1,dim]) fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer()) fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer()) logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases) predictions=tf.nn.softmax(logits_) #cross_entropy = -tf.reduce_sum(y*tf.log(predictions)) cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_)) #cross_entropy_mean=tf.reduce_mean(cross_entropy) train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy) correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1)) #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32)) accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess: o_dir="E:/test" num_classes=182 batch_size=3 epoches=2 batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size)) sess.run(tf.global_variables_initializer()) saver=tf.train.Saver(variables_to_restore) saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt")) for epoch in range(epoches): for batch in batches: sess.run(train_step,feed_dict={X:batch[0],y:batch[1]}) acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]}) print(acc) print("Done")
阅读全文
0 0
- tensorflow对自己的数据进行训练(选择性的恢复权值)(26)---《深度学习》
- TensorFlow学习笔记(十一)读取自己的数据进行训练
- 深度学习文章5:使用caffe对自己的图像数据进行训练并测试
- 深度学习(三) YOLOv2训练自己的数据集
- 选择性的加载网络模型的前几层训练(27)---《深度学习》
- 【神经网络与深度学习】Caffe使用step by step:使用自己数据对已经训练好的模型进行finetuning
- Tensorflow 训练自己的数据集(二)(TFRecord)
- TensorFlow——训练自己的数据(一)数据处理
- Tensorflow + ResNet101 + fasterRcnn 训练自己的模型 数据(一)
- 深度学习 9. MatConvNet 利用mnist的model来训练自己的data。MatConvNet 训练自己数据(一)。
- 深度学习(十四):详解Matconvnet使用imagenet模型训练自己的数据集
- 深度学习Caffe实战笔记(20)Windows平台 Faster-RCNN 训练自己的数据集
- caffe学习笔记3:使用caffe对自己的图像数据进行训练和测试
- 用自己的图片数据做tensorflow深度学习
- 深度学习ssd检测模型训练自己的数据集
- TensorFlow——训练自己的数据(三)模型训练
- 利用tensorflow训练自己的图片数据(4)——神经网络训练
- 利用tensorflow训练自己的图片数据(5)——测试训练网络
- ICCV 2017 Tutorial on Instance-level Visual Recohnition(slide1 introduction)
- Qt新建线程的方法(四种办法,很详细,有截图)
- caffe-源码学习——只看一篇就够了
- java8相关
- Greendao+多线程断点续传
- tensorflow对自己的数据进行训练(选择性的恢复权值)(26)---《深度学习》
- 【Scikit-Learn 中文文档】支持向量机
- leetCode-Best Time to Buy and Sell Stock II
- Spring—ApplicationListener
- 189. Rotate Array
- 移动距离
- PHP+Mysql模拟队列
- 什么是ConcurrentHashMap?
- ftp 主被动问题