tensorflow对自己的数据进行训练(选择性的恢复权值)(26)---《深度学习》

来源:互联网 发布:淘宝运营专员岗位职责 编辑:程序博客网 时间:2024/03/29 14:30

利用生成的批数据对inception_v3模型进行微调,然后进行训练!

1)利用slim.assign_from_checkpoint_fn函数进行恢复

#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()):    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)    shape=logits.get_shape().as_list()    dim=1    for d in shape[1:]:        dim*=d    fc_=tf.reshape(logits,[-1,dim])    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)    predictions=tf.nn.softmax(logits_)    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))      cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))    #cross_entropy_mean=tf.reduce_mean(cross_entropy)    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess:    o_dir="E:/test"    num_classes=182    batch_size=3    epoches=2    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))    init_fn = slim.assign_from_checkpoint_fn(        os.path.join("E:/", 'inception_v3.ckpt'),        slim.get_model_variables(),        ignore_missing_vars=True)    init_op=tf.global_variables_initializer()    sess.run(init_op)    init_fn(sess)    for epoch in range(epoches):        for batch in batches:            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})    print(acc)    print("Done")

2)利用saver.restore进行恢复

原始版:

#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()):    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)    exclude=[]    variables_to_restore=slim.get_variables_to_restore(exclude=['Mixed_7c'])    '''    for v in variables_to_restore:        if 'Mixed_7c' not in v.name:            exclude.append(v)    '''    shape=logits.get_shape().as_list()    dim=1    for d in shape[1:]:        dim*=d    fc_=tf.reshape(logits,[-1,dim])    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)    predictions=tf.nn.softmax(logits_)    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))      cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))    #cross_entropy_mean=tf.reduce_mean(cross_entropy)    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess:    o_dir="E:/test"    num_classes=182    batch_size=3    epoches=2    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))    sess.run(tf.global_variables_initializer())    #加载需要恢复的权重,舍弃掉不需要恢复的权值    saver=tf.train.Saver(variables_to_restore)    saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt"))    for epoch in range(epoches):        for batch in batches:            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})    print(acc)    print("Done")

改进版:

#-*-coding=utf-8-*-import tensorflow as tfimport tensorflow.contrib.slim as slimimport tensorflow.contrib.slim.nets as nets#from tensorflow.contrib.slim.nets.inception import inception_v3, inception_v3_arg_scopeimport numpy as npimport osimport img_convertheight = 299width = 299channels = 3num_classes=1001X = tf.placeholder(tf.float32, shape=[None, height, width, channels])y = tf.placeholder(tf.float32,shape=[None,182])with slim.arg_scope(nets.inception.inception_v3_arg_scope()):    logits, end_points = nets.inception.inception_v3(X, num_classes=num_classes,is_training=False)    variables_to_restore_t=[]    variables_to_restore=slim.get_variables_to_restore()    for v in variables_to_restore:        if 'Mixed_7c' not in v.name:            variables_to_restore_t.append(v)    variables_to_restore = variables_to_restore_t    for v in variables_to_restore:        print (v.name)    shape=logits.get_shape().as_list()    dim=1    for d in shape[1:]:        dim*=d    fc_=tf.reshape(logits,[-1,dim])    fc0_weights=tf.get_variable(name="fc0_weights",shape=(1001,182),initializer=tf.contrib.layers.xavier_initializer())    fc0_biases=tf.get_variable(name="fc0_biases",shape=(182),initializer=tf.contrib.layers.xavier_initializer())    logits_=tf.nn.bias_add(tf.matmul(fc_,fc0_weights),fc0_biases)    predictions=tf.nn.softmax(logits_)    #cross_entropy = -tf.reduce_sum(y*tf.log(predictions))      cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits_))    #cross_entropy_mean=tf.reduce_mean(cross_entropy)    train_step=tf.train.GradientDescentOptimizer(1e-6).minimize(cross_entropy)    correct_pred=tf.equal(tf.argmax(y,1),tf.argmax(predictions,1))    #acc=tf.reduce_sum(tf.cast(correct_pred,tf.float32))    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))with tf.Session() as sess:    o_dir="E:/test"    num_classes=182    batch_size=3    epoches=2    batches=img_convert.data_lrn(img_convert.load_data(o_dir,num_classes,batch_size))    sess.run(tf.global_variables_initializer())    saver=tf.train.Saver(variables_to_restore)    saver.restore(sess,os.path.join("E:\\","inception_v3.ckpt"))    for epoch in range(epoches):        for batch in batches:            sess.run(train_step,feed_dict={X:batch[0],y:batch[1]})    acc=sess.run(accuracy,feed_dict={X:batches[0][0],y:batches[1][1]})    print(acc)    print("Done")
阅读全文
0 0