tensorflow examples

来源:互联网 发布:现在做淘宝还赚钱吗 编辑:程序博客网 时间:2024/05/16 13:46
mnist之于机器学习,便如同hello world之于程序语言。

code

本文主要是代码汇总,不涉及具体理论。

主要内容如下:

1)线性回归

2)logistic回归

3)人工神经网络

4)cnn

5)双端lstm

6)模型的保存和加载

7)其他

  • word2vec

https://github.com/fangpin/daily_programs/blob/master/python/tensorflow/word2vec_basic.py

  • chatbot(seq2seq)

https://github.com/fangpin/daily_programs/tree/master/python/tensorflow/chatbot

  • time series predection(双端lstm)

https://github.com/fangpin/daily_programs/tree/master/python/tensorflow/time-series-predection

免责申明

文中代码难免naive,仅供参考。

线性回归

import numpy as npimport tensorflow as tfx_data = np.float32(np.random.rand(2,100))y_data = np.dot([2.0,3.0],x_data) + 0.8b = tf.Variable(tf.zeros([1]))w = tf.Variable(tf.random_uniform([1,2],-1.0,1.0))y = tf.matmul(w,x_data) + bloss = tf.reduce_mean(tf.square(y-y_data))train = tf.train.GradientDescentOptimizer(0.5).minimize(loss)init = tf.initialize_all_variables()sess = tf.Session()sess.run(init)for step in range(1001):    sess.run(train)    if step%100 == 0:        print step , sess.run(w) , sess.run(b)

logistic回归

from __future__ import absolute_importfrom __future__ import division# Import dataimport input_dataimport tensorflow as tfflags = tf.app.flagsFLAGS = flags.FLAGSflags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)x_data = tf.placeholder(tf.float32,[None,784])w = tf.Variable(tf.random_uniform([784,10],-1.0,1.0),name='weights')b = tf.Variable(tf.random_uniform([10],-1.0,1.0),name='biase')init = tf.initialize_all_variables()y = tf.nn.softmax(tf.matmul(x_data,w)+b)y_ = tf.placeholder(tf.float32,[None,10])cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))train_step = tf.train.GradientDescentOptimizer(1).minimize(cross_entropy)correct_predection = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))p = tf.reduce_mean(tf.cast(correct_predection,"float"))tf.scalar_summary('cross_entropy',cross_entropy)tf.scalar_summary('accuracy',p)merge_summary = tf.merge_all_summaries()saver = tf.train.Saver()with tf.Session() as sess:    sess.run(init)    summary_writer = tf.train.SummaryWriter('/tmp/log',sess.graph)    for i in range(1000):        batch_x,batch_y = mnist.train.next_batch(100)        sess.run(train_step,feed_dict={x_data:batch_x,y_:batch_y})        summary = sess.run(merge_summary,feed_dict={x_data:batch_x,y_:batch_y})        summary_writer.add_summary(summary,i)    print sess.run(p,feed_dict={x_data:mnist.test.images,y_:mnist.test.labels})    print 'modle saved at '+saver.save(sess,"model.cpkt")

人工神经网络

from __future__ import absolute_importfrom __future__ import division# Import dataimport input_dataimport tensorflow as tfflags = tf.app.flagsFLAGS = flags.FLAGSflags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)def sigmoid(x):    return 1.0/(1+tf.exp(-x))x_data = tf.placeholder(tf.float32,[None,784])w1 = tf.Variable(tf.random_uniform([784,100],-1.0,1.0))w2 = tf.Variable(tf.random_uniform([100,10],-1.0,1.0))b1 = tf.Variable(tf.random_uniform([100],-1.0,1.0))b2 = tf.Variable(tf.random_uniform([10],-1.0,1.0))y = tf.nn.softmax(tf.matmul(sigmoid(tf.matmul(x_data,w1)+b1),w2)+b2)y_ = tf.placeholder(tf.float32,[None,10])cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))train_step = tf.train.GradientDescentOptimizer(1).minimize(cross_entropy)init = tf.initialize_all_variables()sess = tf.Session()sess.run(init)for _ in range(10000):    batch_x,batch_y = mnist.train.next_batch(100)    sess.run(train_step,feed_dict={x_data:batch_x,y_:batch_y})correct_predection = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))p = tf.reduce_mean(tf.cast(correct_predection,"float"))print sess.run(p,feed_dict={x_data:mnist.test.images,y_:mnist.test.labels})sess.close()

cnn

import tensorflow as tf# Import MINST datafrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)# Parameterslearning_rate = 0.001batch_size = 256steps = 2000# Network Parametersn_input = 784 # MNIST data input (img shape: 28*28)n_classes = 10 # MNIST total classes (0-9 digits)# tf Graph inputx = tf.placeholder(tf.float32, [None, n_input])y = tf.placeholder(tf.float32, [None, n_classes])# Create some wrappers for simplicitydef conv2d(x, w, b, strides=[1,1], padding="SAME"):    return tf.nn.relu(tf.nn.conv2d(x,w,strides=[1,strides[0],strides[0],1],padding=padding)+b)def max_pool(x, ksize=[2,2],strides=[2,2],padding="VALID"):    return tf.nn.max_pool(x, ksize=[1, ksize[0], ksize[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding)def avg_pool(x, ksize=[2,2], strides=[2,2], padding="VALID"):    return tf.nn.avg_pool(x, ksize=[1, ksize[0], ksize[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding)def conv_net(x, weights, biases):    # Reshape input picture    x = tf.reshape(x, shape=[-1, 28, 28, 1])    conv1 = conv2d(x, w['c1'], b['c1'])    conv1 = max_pool(conv1)    conv2 = conv2d(conv1, w['c2'], b['c2'])    conv2 = avg_pool(conv2)    fc1 = tf.reshape(conv2,[-1,7*7*32])    fc1 = tf.nn.relu(tf.matmul(fc1, w['fc1']) + b['fc1'])    fc2 = tf.matmul(fc1,w['fc2'])+b['fc2']    return fc2# Store layers weight & biasw = {    'c1': tf.Variable(tf.random_normal([5, 5, 1, 16])),    'c2': tf.Variable(tf.random_normal([5, 5, 16, 32])),    'fc1': tf.Variable(tf.random_normal([7*7*32,256])),    'fc2': tf.Variable(tf.random_normal([256, 10])),}b = {    'c1': tf.Variable(tf.zeros([16])),    'c2': tf.Variable(tf.zeros([32])),    'fc1': tf.Variable(tf.zeros([256])),    'fc2': tf.Variable(tf.zeros([10])),}pred = conv_net(x, w, b)# Define loss and optimizercost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# Evaluate modelcorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))init = tf.initialize_all_variables()with tf.Session() as sess:    sess.run(init)    for step in range(steps):        batch_x, batch_y = mnist.train.next_batch(batch_size)        _, loss, acc = sess.run([optimizer,cost,accuracy], feed_dict={x: batch_x, y: batch_y})        if step % 10 == 0 and step>0:            print 'loss',loss,'acc',acc,'at step',step    print "Optimization Finished!"    # Calculate accuracy for 256 mnist test images    print "Testing Accuracy:", \        sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})

双端lstm

import tensorflow as tfimport numpy as npimport input_datamnist = input_data.read_data_sets('/tmp/data/',one_hot=True)learning_rate = 0.01n_steps = 28n_input = 28batch_size = 256n_hidden =128n_class = 10weights ={    'outputs': tf.Variable(tf.random_uniform([2*n_hidden,n_class]))}biaes ={    'outputs': tf.Variable(tf.random_uniform([n_class]))}x_data = tf.placeholder('float',[None,n_steps,n_input])y = tf.placeholder('float',[None,n_class])def birnn(x_data,weights,biaes):    x_data = tf.transpose(x_data,[1,0,2])    x_data = tf.reshape(x_data,[-1,n_input])    x_data =  tf.split(0,n_steps,x_data)    lstm_fw = tf.nn.rnn_cell.BasicLSTMCell(n_hidden,forget_bias=1.0)    lstm_bw = tf.nn.rnn_cell.BasicLSTMCell(n_hidden,forget_bias=1.0)    try:        outputs, _, _ = rnn.bidirectional_rnn(lstm_fw,lstm_bw,x_data,dtype=tf.float32)    except Exception:        outputs = tf.nn.bidirectional_rnn(lstm_fw,lstm_bw,x_data,dtype=tf.float32)    return tf.matmul(outputs[-1],weights['outputs']) + biaes['outputs']pred = birnn(x_data,weights,biaes)loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))train = tf.train.AdamOptimizer(learning_rate).minimize(loss)correct_pred = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))init = tf.initialize_all_variables()with tf.Session() as sess:    sess.run(init)    for i in range(501):        batch_x,batch_y = mnist.train.next_batch(batch_size)        batch_x = batch_x.reshape([batch_size,n_steps,n_input])        sess.run(train,feed_dict={x_data:batch_x,y:batch_y})        if i%10==0 and i>0:            acc,cost = sess.run([accuracy,loss],feed_dict={x_data:batch_x,y:batch_y})            print 'at step',i,'cost',cost,'accuracy',acc    print 'done'    test_data = mnist.test.images.reshape([-1,n_steps,n_input])    y_ = mnist.test.labels    print 'final test accuracy %f'%(sess.run(accuracy,feed_dict={x_data:test_data,y:y_}),)

模型的保存和加载

from tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)import tensorflow as tf# Parameterslearning_rate = 0.001batch_size = 100display_step = 1model_path = "model.ckpt"# Network Parametersn_hidden_1 = 256 # 1st layer number of featuresn_hidden_2 = 256 # 2nd layer number of featuresn_input = 784 # MNIST data input (img shape: 28*28)n_classes = 10 # MNIST total classes (0-9 digits)# tf Graph inputx = tf.placeholder("float", [None, n_input])y = tf.placeholder("float", [None, n_classes])# Create modeldef multilayer_perceptron(x, weights, biases):    # Hidden layer with RELU activation    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])    layer_1 = tf.nn.relu(layer_1)    # Hidden layer with RELU activation    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])    layer_2 = tf.nn.relu(layer_2)    # Output layer with linear activation    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']    return out_layer# Store layers weight & biasweights = {    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))}biases = {    'b1': tf.Variable(tf.random_normal([n_hidden_1])),    'b2': tf.Variable(tf.random_normal([n_hidden_2])),    'out': tf.Variable(tf.random_normal([n_classes]))}# Construct modelpred = multilayer_perceptron(x, weights, biases)# Define loss and optimizercost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# Initializing the variablesinit = tf.initialize_all_variables()# 'Saver' op to save and restore all the variablessaver = tf.train.Saver()# Running first sessionprint "Starting 1st session..."with tf.Session() as sess:    # Initialize variables    sess.run(init)    # Training cycle    for epoch in range(3):        avg_cost = 0.        total_batch = int(mnist.train.num_examples/batch_size)        # Loop over all batches        for i in range(total_batch):            batch_x, batch_y = mnist.train.next_batch(batch_size)            # Run optimization op (backprop) and cost op (to get loss value)            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,                                                          y: batch_y})            # Compute average loss            avg_cost += c / total_batch        # Display logs per epoch step        if epoch % display_step == 0:            print "Epoch:", '%04d' % (epoch+1), "cost=", \                "{:.9f}".format(avg_cost)    print "First Optimization Finished!"    # Test model    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    # Calculate accuracy    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))    print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})    # Save model weights to disk    save_path = saver.save(sess, model_path)    print "Model saved in file: %s" % save_path
0 0
原创粉丝点击