Tensorflow学习精要版II

来源:互联网 发布:淘宝出货单打印软件 编辑:程序博客网 时间:2024/05/16 23:55

主要讲Tensorflow的基础使用方式
使用到的文件 fully_connected_feed.py. 由于另一个文件 mnist.py 由于安装tf时已经有了所以不用单独下载。
秉承惯例,先放代码,再分析。

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# =============================================================================="""Trains and Evaluates the MNIST network using a feed dictionary."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_function# pylint: disable=missing-docstringimport argparseimport os.pathimport sysimport timefrom six.moves import xrange  # pylint: disable=redefined-builtinimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datafrom tensorflow.examples.tutorials.mnist import mnist# Basic model parameters as external flags.FLAGS = Nonedef placeholder_inputs(batch_size):  """Generate placeholder variables to represent the input tensors.  These placeholders are used as inputs by the rest of the model building  code and will be fed from the downloaded data in the .run() loop, below.  Args:    batch_size: The batch size will be baked into both placeholders.  Returns:    images_placeholder: Images placeholder.    labels_placeholder: Labels placeholder.  """  # Note that the shapes of the placeholders match the shapes of the full  # image and label tensors, except the first dimension is now batch_size  # rather than the full size of the train or test data sets.  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,                                                         mnist.IMAGE_PIXELS))  labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))  return images_placeholder, labels_placeholderdef fill_feed_dict(data_set, images_pl, labels_pl):  """Fills the feed_dict for training the given step.  A feed_dict takes the form of:  feed_dict = {      <placeholder>: <tensor of values to be passed for placeholder>,      ....  }  Args:    data_set: The set of images and labels, from input_data.read_data_sets()    images_pl: The images placeholder, from placeholder_inputs().    labels_pl: The labels placeholder, from placeholder_inputs().  Returns:    feed_dict: The feed dictionary mapping from placeholders to values.  """  # Create the feed_dict for the placeholders filled with the next  # `batch size` examples.  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,                                                 FLAGS.fake_data)  feed_dict = {      images_pl: images_feed,      labels_pl: labels_feed,  }  return feed_dictdef do_eval(sess,            eval_correct,            images_placeholder,            labels_placeholder,            data_set):  """Runs one evaluation against the full epoch of data.  Args:    sess: The session in which the model has been trained.    eval_correct: The Tensor that returns the number of correct predictions.    images_placeholder: The images placeholder.    labels_placeholder: The labels placeholder.    data_set: The set of images and labels to evaluate, from      input_data.read_data_sets().  """  # And run one epoch of eval.  true_count = 0  # Counts the number of correct predictions.  # “//”表示向下取整  steps_per_epoch = data_set.num_examples // FLAGS.batch_size  num_examples = steps_per_epoch * FLAGS.batch_size  for step in xrange(steps_per_epoch):    feed_dict = fill_feed_dict(data_set,                               images_placeholder,                               labels_placeholder)    true_count += sess.run(eval_correct, feed_dict=feed_dict)  precision = float(true_count) / num_examples  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %        (num_examples, true_count, precision))def run_training():  """Train MNIST for a number of steps."""  # Get the sets of images and labels for training, validation, and  # test on MNIST.  data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)  # Tell TensorFlow that the model will be built into the default Graph.  with tf.Graph().as_default():    # Generate placeholders for the images and labels.    images_placeholder, labels_placeholder = placeholder_inputs(        FLAGS.batch_size)    # Build a Graph that computes predictions from the inference model.    logits = mnist.inference(images_placeholder,                             FLAGS.hidden1,                             FLAGS.hidden2)    # Add to the Graph the Ops for loss calculation.    loss = mnist.loss(logits, labels_placeholder)    # Add to the Graph the Ops that calculate and apply gradients.    train_op = mnist.training(loss, FLAGS.learning_rate)######################到此Graph建立完毕##############################    # Add the Op to compare the logits to the labels during evaluation.    eval_correct = mnist.evaluation(logits, labels_placeholder)    # Build the summary Tensor based on the TF collection of Summaries.    summary = tf.summary.merge_all()    # Add the variable initializer Op.    init = tf.global_variables_initializer()    # Create a saver for writing training checkpoints.    saver = tf.train.Saver()    # Create a session for running Ops on the Graph.    sess = tf.Session()    # Instantiate a SummaryWriter to output summaries and the Graph.    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) #########################所有的东西已经配置完毕######################    # Run the Op to initialize the variables.    sess.run(init)    # Start the training loop.    for step in xrange(FLAGS.max_steps):      start_time = time.time()      # Fill a feed dictionary with the actual set of images and labels      # for this particular training step.      feed_dict = fill_feed_dict(data_sets.train,                                 images_placeholder,                                 labels_placeholder)      _, loss_value = sess.run([train_op, loss],                               feed_dict=feed_dict)      duration = time.time() - start_time      # Write the summaries and print an overview fairly often.      if step % 100 == 0:        # Print status to stdout.        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))        # Update the events file.        summary_str = sess.run(summary, feed_dict=feed_dict)        summary_writer.add_summary(summary_str, step)        summary_writer.flush()      # 每一千次进行一次评估      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:        checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')        saver.save(sess, checkpoint_file, global_step=step)        # Evaluate against the training set.        print('Training Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.train)        # Evaluate against the validation set.        print('Validation Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.validation)        # Evaluate against the test set.        print('Test Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.test)def main(_):  if tf.gfile.Exists(FLAGS.log_dir):    tf.gfile.DeleteRecursively(FLAGS.log_dir)  tf.gfile.MakeDirs(FLAGS.log_dir)  run_training()if __name__ == '__main__':  parser = argparse.ArgumentParser()  parser.add_argument(      '--learning_rate',      type=float,      default=0.01,      help='Initial learning rate.'  )  parser.add_argument(      '--max_steps',      type=int,      default=2000,      help='Number of steps to run trainer.'  )  parser.add_argument(      '--hidden1',      type=int,      default=128,      help='Number of units in hidden layer 1.'  )  parser.add_argument(      '--hidden2',      type=int,      default=32,      help='Number of units in hidden layer 2.'  )  parser.add_argument(      '--batch_size',      type=int,      default=100,      help='Batch size.  Must divide evenly into the dataset sizes.'  )  parser.add_argument(      '--input_data_dir',      type=str,      default='/tmp/tensorflow/mnist/input_data',      help='Directory to put the input data.'  )  parser.add_argument(      '--log_dir',      type=str,      default='/tmp/tensorflow/mnist/logs/fully_connected_feed',      help='Directory to put the log data.'  )  parser.add_argument(      '--fake_data',      default=False,      help='If true, uses fake data for unit testing.',      action='store_true'  )  FLAGS, unparsed = parser.parse_known_args()  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

基本要点:

创建模型

  • inference:也就是直接构造图。
    在inference中,每一层要用类似 with tf.name_scope('hidden1')来进行构造,比如

    weights = tf.Variable(tf.truncated_normal([IMAGE_PIXELS, hidden1_units],                    stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),name='weights')biases = tf.Variable(tf.zeros([hidden1_units]),                 name='biases')

    如此一来,这一层的权值就会被赋予hidden1/weights名称

  • loss:加入损失节点
labels = tf.to_int64(labels)cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(    logits, labels, name='xentropy')loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
  • training:加入训练节点

    def training(loss, learning_rate):  # Add a scalar summary for the snapshot loss.  tf.summary.scalar('loss', loss)  # Create the gradient descent optimizer with the given learning rate.  optimizer = tf.train.GradientDescentOptimizer(learning_rate)  # Create a variable to track the global step.   global_step = tf.Variable(0, name='global_step', trainable=False)  # Use the optimizer to apply the gradients that minimize the loss  # (and also increment the global step counter) as a single training step.  train_op = optimizer.minimize(loss, global_step=global_step)  return train_op

训练模型

with tf.Graph().as_default():    images_placeholder, labels_placeholder = placeholder_inputs(        FLAGS.batch_size)    #构建模型三步    logits = mnist.inference(images_placeholder,                             FLAGS.hidden1,                             FLAGS.hidden2)    loss = mnist.loss(logits, labels_placeholder)    train_op = mnist.training(loss, FLAGS.learning_rate)    #额外的设置:加入评估节点,创建summary以及增加checkpoints    eval_correct = mnist.evaluation(logits, labels_placeholder)    summary = tf.summary.merge_all()    init = tf.global_variables_initializer()    saver = tf.train.Saver()    sess = tf.Session()    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)    sess.run(init)    #先弄 Train Loop    #跑一遍分成max_steps个batch,在每次处理一个batch时,fill_feed_dict来进行填充此块batch的数据。    for step in xrange(FLAGS.max_steps):    #------------主体内容-----------------------      start_time = time.time()      feed_dict = fill_feed_dict(data_sets.train,                                 images_placeholder,                                 labels_placeholder)      #只保留loss就行      _, loss_value = sess.run([train_op, loss],                               feed_dict=feed_dict)      duration = time.time() - start_time    #---------- 保存一下中间结果,并输出一下----------------    #这个需要保存频繁点,因为可以用tensorboard显示         if step % 100 == 0:      # Print status to stdout.      print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))      # Update the events file.      summary_str = sess.run(summary, feed_dict=feed_dict)      summary_writer.add_summary(summary_str, step)      summary_writer.flush()    #--------------保存中间模型,并评估,这个不用太频繁----------------      # Save a checkpoint and evaluate the model periodically.      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:        checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')        saver.save(sess, checkpoint_file, global_step=step)        # Evaluate against the training set.        print('Training Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.train)        # Evaluate against the validation set.        print('Validation Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.validation)        # Evaluate against the test set.        print('Test Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.test)
0 0
原创粉丝点击