tensorflow学习——tf.train.Supervisor()与tf.train.saver()

来源:互联网 发布:数据链路层和网络层 编辑:程序博客网 时间:2024/06/09 18:52

1、tf.train.Supervisor()

import tensorflow as tfimport numpy as npimport oslog_path = 'ckptdir/'log_name = 'liner.ckpt'x_data = np.random.rand(100).astype(np.float32)y_data = x_data*0.1 + 0.3w = tf.Variable(tf.random_normal([1]))b = tf.Variable(tf.zeros([1]))y = w*x_data + bloss = tf.reduce_mean(tf.square(y-y_data))train = tf.train.AdamOptimizer(0.5).minimize(loss)tf.summary.scalar('loss', loss)saver = tf.train.Saver()init = tf.global_variables_initializer()merged = tf.summary.merge_all()sv = tf.train.Supervisor(logdir=log_path, init_op=init)  # logdir用来保存checkpoint和summarysaver = sv.saver   # 创建saverwith sv.managed_session() as sess:  # 会自动去logdir中去找checkpoint,如果没有的话,自动执行初始化#    sess.run(init)#    if len(os.listdir(log_path)) != 0:#        saver.restore(sess, os.path.join(log_path, log_name))    for step in range(201):        sess.run(train)        if step%50 == 0:            print(step, sess.run(w), sess.run(b))            merged_summary = sess.run(merged)            sv.summary_computed(sess, merged_summary,global_step=step)    saver.save(sess, os.path.join(log_path, 'liner.ckpt'))

从上面代码可以看出,Supervisor帮助我们处理一些事情
(1)自动去checkpoint加载数据或初始化数据
(2)自身有一个Saver,可以用来保存checkpoint
(3)有一个summary_computed用来保存Summary
所以,我们就不需要:
(1)手动初始化或从checkpoint中加载数据
(2)不需要创建Saver,使用sv内部的就可以
(3)不需要创建summary writer
2、tf.train.Saver()

import tensorflow as tfimport numpy as npimport oslog_path = 'ckptdir'log_name = 'liner.ckpt'x_data = np.random.rand(100).astype(np.float32)y_data = x_data*0.1 + 0.3w = tf.Variable(tf.random_normal([1]))b = tf.Variable(tf.zeros([1]))y = w*x_data + bloss = tf.reduce_mean(tf.square(y-y_data))train = tf.train.AdamOptimizer(0.5).minimize(loss)tf.summary.scalar('loss', loss)saver = tf.train.Saver()init = tf.global_variables_initializer()merged = tf.summary.merge_all()with tf.Session() as sess:    sess.run(init)    print("loading model from checkpoint")    checkpoint = tf.train.latest_checkpoint(os.path.join(log_path, log_name))    restore_saver.restore(sess, checkpoint)    #if len(os.listdir(log_path)) != 0:    #    saver.restore(sess, os.path.join(log_path, log_name))    for step in range(201):        sess.run(train)        if step%50 ==0:            print(step, sess.run(w), sess.run(b))    summary_writer = tf.summary.FileWriter(log_path, sess.graph)    summary_all = sess.run(merged)    summary_writer.add_summary(summary_all)    summary_writer.close()    saver.save(sess, os.path.join(log_path, 'liner.ckpt'))
原创粉丝点击