tensorflow手册tensorboard可视化

来源:互联网 发布:java kindle 百度云 编辑:程序博客网 时间:2024/06/03 05:49

1、利用mnist.py和fully_conencted_feed.py,在目录下生成mnist_logs文件夹

mnist.py

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# =============================================================================="""Builds the MNIST network.Implements the inference/loss/training pattern for model building.1. inference() - Builds the model as far as required for running the networkforward to make predictions.2. loss() - Adds to the inference model the layers required to generate loss.3. training() - Adds to the loss model the Ops required to generate andapply gradients.This file is used by the various "fully_connected_*.py" files and not meant tobe run."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_functionimport mathimport tensorflow as tf# The MNIST dataset has 10 classes, representing the digits 0 through 9.NUM_CLASSES = 10# The MNIST images are always 28x28 pixels.IMAGE_SIZE = 28IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZEdef inference(images, hidden1_units, hidden2_units):  """Build the MNIST model up to where it may be used for inference.  Args:    images: Images placeholder, from inputs().    hidden1_units: Size of the first hidden layer.    hidden2_units: Size of the second hidden layer.  Returns:    softmax_linear: Output tensor with the computed logits.  """  # 创建命名空间hidden1  with tf.name_scope('hidden1'):    # 截断的产生正太分布的函数,产生正太分布的值如果与均值的差值大于两倍的标准差,那就重新生成。    # 这个函数产生的随机数与均值的差距不会超过两倍的标准差    weights = tf.Variable(        tf.truncated_normal([IMAGE_PIXELS, hidden1_units],                            stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),        name='weights')    biases = tf.Variable(tf.zeros([hidden1_units]),                         name='biases')    hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)  # 创建命名空间hidden2  with tf.name_scope('hidden2'):    weights = tf.Variable(        tf.truncated_normal([hidden1_units, hidden2_units],                            stddev=1.0 / math.sqrt(float(hidden1_units))),        name='weights')    biases = tf.Variable(tf.zeros([hidden2_units]),                         name='biases')    hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)  # softmax Regression命名空间softmax_linear  with tf.name_scope('softmax_linear'):    weights = tf.Variable(        tf.truncated_normal([hidden2_units, NUM_CLASSES],                            stddev=1.0 / math.sqrt(float(hidden2_units))),        name='weights')    biases = tf.Variable(tf.zeros([NUM_CLASSES]),                         name='biases')    logits = tf.matmul(hidden2, weights) + biases  return logitsdef loss(logits, labels):  """Calculates the loss from the logits and the labels.  Args:    logits: Logits tensor, float - [batch_size, NUM_CLASSES].    labels: Labels tensor, int32 - [batch_size].  Returns:    loss: Loss tensor of type float.  """  labels = tf.to_int64(labels)  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(      labels=labels, logits=logits, name='xentropy')  return tf.reduce_mean(cross_entropy, name='xentropy_mean')def training(loss, learning_rate):  """Sets up the training Ops.  Creates a summarizer to track the loss over time in TensorBoard.  Creates an optimizer and applies the gradients to all trainable variables.  The Op returned by this function is what must be passed to the  `sess.run()` call to cause the model to train.  Args:    loss: Loss tensor, from loss().    learning_rate: The learning rate to use for gradient descent.  Returns:    train_op: The Op for training.  """  # Add a scalar summary for the snapshot loss.  # 将loss添加到tensorboard的scalar  tf.summary.scalar('loss', loss)  # Create the gradient descent optimizer with the given learning rate.  optimizer = tf.train.GradientDescentOptimizer(learning_rate)  # Create a variable to track the global step.  global_step = tf.Variable(0, name='global_step', trainable=False)  # Use the optimizer to apply the gradients that minimize the loss  # (and also increment the global step counter) as a single training step.  train_op = optimizer.minimize(loss, global_step=global_step)  return train_opdef evaluation(logits, labels):  """Evaluate the quality of the logits at predicting the label.  Args:    logits: Logits tensor, float - [batch_size, NUM_CLASSES].    labels: Labels tensor, int32 - [batch_size], with values in the      range [0, NUM_CLASSES).  Returns:    A scalar int32 tensor with the number of examples (out of batch_size)    that were predicted correctly.  """  # For a classifier model, we can use the in_top_k Op.  # It returns a bool tensor with shape [batch_size] that is true for  # the examples where the label is in the top k (here k=1)  # of all logits for that example.  correct = tf.nn.in_top_k(logits, labels, 1)  # Return the number of true entries.  return tf.reduce_sum(tf.cast(correct, tf.int32))


fully_conencted_feed.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# =============================================================================="""Trains and Evaluates the MNIST network using a feed dictionary."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_function# pylint: disable=missing-docstringimport argparseimport osimport sysimport timefrom six.moves import xrange  # pylint: disable=redefined-builtinimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_dataimport mnist# Basic model parameters as external flags.# 定义flags组件封装变量flags = tf.app.flagsFLAGS = flags.FLAGSflags.DEFINE_integer('batch_size', 100, 'batch size.')flags.DEFINE_integer('hidden1', 128, 'hidden1.')flags.DEFINE_integer('hidden2', 32, 'hidden2.')flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data for unit testing.')flags.DEFINE_string('input_data_dir', 'MNIST_data/', 'Directory for storing data')flags.DEFINE_float('learning_rate', 0.05, 'Initial learning rate.')flags.DEFINE_string('log_dir', 'mnist_logs/', 'Summaries directory')flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')# 为训练数据集生成占位符字典def placeholder_inputs(batch_size):  """Generate placeholder variables to represent the input tensors.  These placeholders are used as inputs by the rest of the model building  code and will be fed from the downloaded data in the .run() loop, below.  Args:    batch_size: The batch size will be baked into both placeholders.  Returns:    images_placeholder: Images placeholder.    labels_placeholder: Labels placeholder.  """  # Note that the shapes of the placeholders match the shapes of the full  # image and label tensors, except the first dimension is now batch_size  # rather than the full size of the train or test data sets.  images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,                                                         mnist.IMAGE_PIXELS))  labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))  return images_placeholder, labels_placeholder# 填充数据集字典def fill_feed_dict(data_set, images_pl, labels_pl):  """Fills the feed_dict for training the given step.  A feed_dict takes the form of:  feed_dict = {      <placeholder>: <tensor of values to be passed for placeholder>,      ....  }  Args:    data_set: The set of images and labels, from input_data.read_data_sets()    images_pl: The images placeholder, from placeholder_inputs().    labels_pl: The labels placeholder, from placeholder_inputs().  Returns:    feed_dict: The feed dictionary mapping from placeholders to values.  """  # Create the feed_dict for the placeholders filled with the next  # `batch size` examples.  images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,                                                 FLAGS.fake_data)  feed_dict = {      images_pl: images_feed,      labels_pl: labels_feed,  }  return feed_dict# 对数据集进行一次评估def do_eval(sess,            eval_correct,            images_placeholder,            labels_placeholder,            data_set):  """Runs one evaluation against the full epoch of data.  Args:    sess: The session in which the model has been trained.    eval_correct: The Tensor that returns the number of correct predictions.    images_placeholder: The images placeholder.    labels_placeholder: The labels placeholder.    data_set: The set of images and labels to evaluate, from      input_data.read_data_sets().  """  # And run one epoch of eval.  true_count = 0  # Counts the number of correct predictions.  # 每个周期的步数等于数据集样本总数除以一批数据数  steps_per_epoch = data_set.num_examples // FLAGS.batch_size  # 样本数  num_examples = steps_per_epoch * FLAGS.batch_size  for step in xrange(steps_per_epoch):    # 填充数据,一次只能填充batch_size数量    feed_dict = fill_feed_dict(data_set,                               images_placeholder,                               labels_placeholder)    # 计算准确数    true_count += sess.run(eval_correct, feed_dict=feed_dict)  precision = float(true_count) / num_examples  print('  Num examples: %d  Num correct: %d  Precision @ 1: %0.04f' %        (num_examples, true_count, precision))def run_training():  """Train MNIST for a number of steps."""  # Get the sets of images and labels for training, validation, and  # test on MNIST.  # 读取数据  data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)  # Tell TensorFlow that the model will be built into the default Graph.  # 设置成默认的图  with tf.Graph().as_default():    # Generate placeholders for the images and labels.    # 为一批数据生成占位符    images_placeholder, labels_placeholder = placeholder_inputs(        FLAGS.batch_size)    # Build a Graph that computes predictions from the inference model.    # 调用mnist生成神经网络,并生成graph    logits = mnist.inference(images_placeholder,                             FLAGS.hidden1,                             FLAGS.hidden2)    # Add to the Graph the Ops for loss calculation.    # 调用mnist计算loss    loss = mnist.loss(logits, labels_placeholder)    # Add to the Graph the Ops that calculate and apply gradients.    # 调用mnist执行优化,并将loss是纯数值生成scatter    train_op = mnist.training(loss, FLAGS.learning_rate)    # Add the Op to compare the logits to the labels during evaluation.    # 调用mnist计算一批数据上训练后准确数    eval_correct = mnist.evaluation(logits, labels_placeholder)    # Build the summary Tensor based on the TF collection of Summaries.    # 融合生成一张图    summary = tf.summary.merge_all()    # Add the variable initializer Op.    init = tf.global_variables_initializer()    # Create a saver for writing training checkpoints.    saver = tf.train.Saver()    # Create a session for running Ops on the Graph.    sess = tf.Session()    # Instantiate a SummaryWriter to output summaries and the Graph.    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)    # And then after everything is built:    # Run the Op to initialize the variables.    sess.run(init)    # Start the training loop.    for step in xrange(FLAGS.max_steps):      start_time = time.time()      # Fill a feed dictionary with the actual set of images and labels      # for this particular training step.      # 填充数据      feed_dict = fill_feed_dict(data_sets.train,                                 images_placeholder,                                 labels_placeholder)      # Run one step of the model.  The return values are the activations      # from the `train_op` (which is discarded) and the `loss` Op.  To      # inspect the values of your Ops or variables, you may include them      # in the list passed to sess.run() and the value tensors will be      # returned in the tuple from the call.      _, loss_value = sess.run([train_op, loss],                               feed_dict=feed_dict)      duration = time.time() - start_time      # Write the summaries and print an overview fairly often.      if step % 100 == 0:        # Print status to stdout.        print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))        # Update the events file.        summary_str = sess.run(summary, feed_dict=feed_dict)        summary_writer.add_summary(summary_str, step)        summary_writer.flush()      # Save a checkpoint and evaluate the model periodically.      if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:        checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')        saver.save(sess, checkpoint_file, global_step=step)        # Evaluate against the training set.        print('Training Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.train)        # Evaluate against the validation set.        print('Validation Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.validation)        # Evaluate against the test set.        print('Test Data Eval:')        do_eval(sess,                eval_correct,                images_placeholder,                labels_placeholder,                data_sets.test)def main(_):  if tf.gfile.Exists(FLAGS.log_dir):    tf.gfile.DeleteRecursively(FLAGS.log_dir)  tf.gfile.MakeDirs(FLAGS.log_dir)  run_training()if __name__ == '__main__':    tf.app.run(main=main)
输出结果:
2、打开cmd命令框,进入mnist_logs上一层目录,输入tensorboard --logdir=mnist_logs



3、将网址输入到浏览器中