TensorFlow简要教程系列(四)TensorFlow实现Softmax回归

来源:互联网 发布:qsv视频格式转换器mac 编辑:程序博客网 时间:2024/05/22 17:07

本节我们建立Softmax回归用于经典的MNIST图像识别数据集作为深度学习入门。主要是学习TensorFlow时作的笔记,大家可以参考官网,本系列增加了自己在学习过程中对于不理解的地方的学习笔记,希望能对大家有所帮助。



这里我们开始实现Softmax,数据集下载在http://yann.lecun.com/exdb/mnist/上,大家也可以自己下载处理。我们利用input_data导入数据(input_data代码在文末),建立模型,具体见注释,最终准确率93%左右。

# -*- coding: utf-8 -*- """Created on Sat Apr  1 09:35:59 2017@author: chenbin"""import input_datamnist = input_data.read_data_sets("MNIST_data/", one_hot=True)import tensorflow as tfx = tf.placeholder('float',[None,784])"""x不是一个特定的值,而是一个占位符placeholder,我们在TensorFlow运行计算时输入这个值。我们希望能够输入任意数量的MNIST图像,每一张图展平成784维的向量。我们用2维的浮点数张量来表示这些图,这个张量的形状是[None,784 ]。(这里的None表示此张量的第一个维度可以是任何长度的。)"""W = tf.Variable(tf.zeros([784,10]))b = tf.Variable(tf.zeros([10]))"""我们赋予tf.Variable不同的初值来创建不同的Variable:在这里,我们都用全为零的张量来初始化W和b。因为我们要学习W和b的值,它们的初值可以随意设置。"""y = tf.nn.softmax(tf.matmul(x,W) + b) #matmul 矩阵相乘y_ = tf.placeholder("float", [None,10]) #记录真实值cross_entropy = -tf.reduce_sum(y_*tf.log(y)) #计算交叉熵,tf.redece_sum是求和train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)"""我们要求TensorFlow用梯度下降算法(gradient descent algorithm)以0.01的学习速率最小化交叉熵。"""init = tf.initialize_all_variables() #初始化变量sess = tf.Session()sess.run(init) #在一个Session里面启动我们的模型,并且初始化变量#训练模型for i in range(1000):  batch_xs, batch_ys = mnist.train.next_batch(100)  sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})""" 该循环的每个步骤中,我们都会随机抓取训练数据中的100个批处理数据点,next_batch随机选然后我们用这些数据点作为参数替换之前的占位符来运行train_step。"""#评估模型correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))"""tf.argmax 是一个非常有用的函数,它能给出某个tensor对象在某一维上的其数据最大值所在的索引值。由于标签向量是由0,1组成,因此最大值1所在的索引位置就是类别标签,比如tf.argmax(y,1)返回的是模型对于任一输入x预测到的标签值,而 tf.argmax(y_,1) 代表正确的标签,我们可以用 tf.equal 来检测我们的预测是否真实标签匹配(索引位置一样表示匹配)"""#取平均值得到准确率accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))"""将x或者x.values转换为dtypetensor a is [1.8, 2.2], dtype=tf.floattf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32"""print (sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))


input_data代码:

# =============================================================================="""Functions for downloading and reading MNIST data."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_functionimport gzipimport osimport numpyfrom six.moves import urllibfrom six.moves import xrange  # pylint: disable=redefined-builtinSOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'def maybe_download(filename, work_directory):  """Download the data from Yann's website, unless it's already here."""  if not os.path.exists(work_directory):    os.mkdir(work_directory)  filepath = os.path.join(work_directory, filename)  if not os.path.exists(filepath):    filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)    statinfo = os.stat(filepath)    print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')  return filepathdef _read32(bytestream):  dt = numpy.dtype(numpy.uint32).newbyteorder('>')  return numpy.frombuffer(bytestream.read(4), dtype=dt)def extract_images(filename):  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""  print('Extracting', filename)  with gzip.open(filename) as bytestream:    magic = _read32(bytestream)    if magic != 2051:      raise ValueError(          'Invalid magic number %d in MNIST image file: %s' %          (magic, filename))    num_images = _read32(bytestream)    rows = _read32(bytestream)    cols = _read32(bytestream)    buf = bytestream.read(rows * cols * num_images)    data = numpy.frombuffer(buf, dtype=numpy.uint8)    data = data.reshape(num_images, rows, cols, 1)    return datadef dense_to_one_hot(labels_dense, num_classes=10):  """Convert class labels from scalars to one-hot vectors."""  num_labels = labels_dense.shape[0]  index_offset = numpy.arange(num_labels) * num_classes  labels_one_hot = numpy.zeros((num_labels, num_classes))  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1  return labels_one_hotdef extract_labels(filename, one_hot=False):  """Extract the labels into a 1D uint8 numpy array [index]."""  print('Extracting', filename)  with gzip.open(filename) as bytestream:    magic = _read32(bytestream)    if magic != 2049:      raise ValueError(          'Invalid magic number %d in MNIST label file: %s' %          (magic, filename))    num_items = _read32(bytestream)    buf = bytestream.read(num_items)    labels = numpy.frombuffer(buf, dtype=numpy.uint8)    if one_hot:      return dense_to_one_hot(labels)    return labelsclass DataSet(object):  def __init__(self, images, labels, fake_data=False):    if fake_data:      self._num_examples = 10000    else:      assert images.shape[0] == labels.shape[0], (          "images.shape: %s labels.shape: %s" % (images.shape,                                                 labels.shape))      self._num_examples = images.shape[0]      # Convert shape from [num examples, rows, columns, depth]      # to [num examples, rows*columns] (assuming depth == 1)      assert images.shape[3] == 1      images = images.reshape(images.shape[0],                              images.shape[1] * images.shape[2])      # Convert from [0, 255] -> [0.0, 1.0].      images = images.astype(numpy.float32)      images = numpy.multiply(images, 1.0 / 255.0)    self._images = images    self._labels = labels    self._epochs_completed = 0    self._index_in_epoch = 0  @property  def images(self):    return self._images  @property  def labels(self):    return self._labels  @property  def num_examples(self):    return self._num_examples  @property  def epochs_completed(self):    return self._epochs_completed  def next_batch(self, batch_size, fake_data=False):    """Return the next `batch_size` examples from this data set."""    if fake_data:      fake_image = [1.0 for _ in xrange(784)]      fake_label = 0      return [fake_image for _ in xrange(batch_size)], [          fake_label for _ in xrange(batch_size)]    start = self._index_in_epoch    self._index_in_epoch += batch_size    if self._index_in_epoch > self._num_examples:      # Finished epoch      self._epochs_completed += 1      # Shuffle the data      perm = numpy.arange(self._num_examples)      numpy.random.shuffle(perm)      self._images = self._images[perm]      self._labels = self._labels[perm]      # Start next epoch      start = 0      self._index_in_epoch = batch_size      assert batch_size <= self._num_examples    end = self._index_in_epoch    return self._images[start:end], self._labels[start:end]def read_data_sets(train_dir, fake_data=False, one_hot=False):  class DataSets(object):    pass  data_sets = DataSets()  if fake_data:    data_sets.train = DataSet([], [], fake_data=True)    data_sets.validation = DataSet([], [], fake_data=True)    data_sets.test = DataSet([], [], fake_data=True)    return data_sets  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'  VALIDATION_SIZE = 5000  local_file = maybe_download(TRAIN_IMAGES, train_dir)  train_images = extract_images(local_file)  local_file = maybe_download(TRAIN_LABELS, train_dir)  train_labels = extract_labels(local_file, one_hot=one_hot)  local_file = maybe_download(TEST_IMAGES, train_dir)  test_images = extract_images(local_file)  local_file = maybe_download(TEST_LABELS, train_dir)  test_labels = extract_labels(local_file, one_hot=one_hot)  validation_images = train_images[:VALIDATION_SIZE]  validation_labels = train_labels[:VALIDATION_SIZE]  train_images = train_images[VALIDATION_SIZE:]  train_labels = train_labels[VALIDATION_SIZE:]  data_sets.train = DataSet(train_images, train_labels)  data_sets.validation = DataSet(validation_images, validation_labels)  data_sets.test = DataSet(test_images, test_labels)  return data_sets

1 0