Tensorflow入门例子(1)

来源:互联网 发布:汽车行业设计软件 编辑:程序博客网 时间:2024/05/16 13:56

BasicModels

K-Means

from __future__ import print_functionimport numpy as npimport tensorflow as tffrom tensorflow.contrib.factorization import KMeansfrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)full_data_x = mnist.train.imagesnum_steps = 50 batch_size = 1024 k = 25 num_classes = 10 num_features = 784 X = tf.placeholder(tf.float32, shape=[None, num_features])Y = tf.placeholder(tf.float32, shape=[None, num_classes])kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine',use_mini_batch=True)  # K-Means Parameters(all_scores, cluster_idx, scores, cluster_centers_initialized, init_op,train_op) = kmeans.training_graph()  # Build KMeans graphcluster_idx = cluster_idx[0]   # fix for cluster_idx being a tupleavg_distance = tf.reduce_mean(scores)init_vars = tf.global_variables_initializer()sess = tf.Session()sess.run(init_vars, feed_dict={X: full_data_x})sess.run(init_op, feed_dict={X: full_data_x})# Trainingfor i in range(1, num_steps + 1):    _, d, idx = sess.run([train_op, avg_distance, cluster_idx],feed_dict={X: full_data_x})    if i % 10 == 0 or i == 1:        print("Step %i, Avg Distance: %f" % (i, d))counts = np.zeros(shape=(k, num_classes))for i in range(len(idx)):    counts[idx[i]] += mnist.train.labels[i]labels_map = [np.argmax(c) for c in counts]labels_map = tf.convert_to_tensor(labels_map)# Evaluation opscluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32))accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))# Test Modeltest_x, test_y = mnist.test.images, mnist.test.labelsprint("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))  # Test Accuracy: 0.7127

linear_regression

from __future__ import print_functionimport tensorflow as tfimport numpyimport matplotlib.pyplot as pltrng = numpy.randomlearning_rate = 0.01training_epochs = 1000display_step = 50train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])n_samples = train_X.shape[0]X = tf.placeholder("float")Y = tf.placeholder("float")W = tf.Variable(rng.randn(), name="weight")b = tf.Variable(rng.randn(), name="bias")pred = tf.add(tf.multiply(X, W), b)cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)init = tf.global_variables_initializer()with tf.Session() as sess:    sess.run(init)    for epoch in range(training_epochs):        for (x, y) in zip(train_X, train_Y):            sess.run(optimizer, feed_dict={X: x, Y: y})        if (epoch+1) % display_step == 0:            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c),"W=", sess.run(W), "b=", sess.run(b))    print("Optimization Finished!")    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})    print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')    plt.plot(train_X, train_Y, 'ro', label='Original data')    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')    plt.legend()    plt.show()    test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])    test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])    print("Testing... (Mean square loss Comparison)")    testing_cost = sess.run(tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),feed_dict={X: test_X, Y: test_Y})      print("Testing cost=", testing_cost)    print("Absolute mean square loss difference:", abs(training_cost - testing_cost))    plt.plot(test_X, test_Y, 'bo', label='Testing data')    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')    plt.legend()    plt.show()

logistic_regression

from __future__ import print_functionimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)learning_rate = 0.01training_epochs = 25batch_size = 100display_step = 1x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classesW = tf.Variable(tf.zeros([784, 10]))b = tf.Variable(tf.zeros([10]))pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmaxcost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)init = tf.global_variables_initializer()with tf.Session() as sess:    sess.run(init)    # Training cycle    for epoch in range(training_epochs):        avg_cost = 0.        total_batch = int(mnist.train.num_examples/batch_size)        for i in range(total_batch):            batch_xs, batch_ys = mnist.train.next_batch(batch_size)            _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,y: batch_ys})            avg_cost += c / total_batch        if (epoch+1) % display_step == 0:            print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))    print("Optimization Finished!")    # Test model    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

nearest_neighbor

from __future__ import print_functionimport numpy as npimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=True)Xtr, Ytr = mnist.train.next_batch(5000) Xte, Yte = mnist.test.next_batch(200) xtr = tf.placeholder("float", [None, 784])xte = tf.placeholder("float", [784])distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)pred = tf.arg_min(distance, 0)accuracy = 0.init = tf.global_variables_initializer()with tf.Session() as sess:    sess.run(init)    # loop over test data    for i in range(len(Xte)):        nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})        print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]),"True Class:", np.argmax(Yte[i]))        if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):            accuracy += 1./len(Xte)    print("Done!")    print("Accuracy:", accuracy)

random_forest

from __future__ import print_functionimport tensorflow as tffrom tensorflow.contrib.tensor_forest.python import tensor_forestfrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("/tmp/data/", one_hot=False)num_steps = 500 batch_size = 1024 num_classes = 10 num_features = 784 num_trees = 10max_nodes = 1000X = tf.placeholder(tf.float32, shape=[None, num_features])Y = tf.placeholder(tf.int32, shape=[None])hparams = tensor_forest.ForestHParams(num_classes=num_classes,num_features=num_features,num_trees=num_trees,max_nodes=max_nodes).fill()forest_graph = tensor_forest.RandomForestGraphs(hparams)train_op = forest_graph.training_graph(X, Y)loss_op = forest_graph.training_loss(X, Y)infer_op = forest_graph.inference_graph(X)correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))init_vars = tf.global_variables_initializer()sess = tf.Session()sess.run(init_vars)# Trainingfor i in range(1, num_steps + 1):    batch_x, batch_y = mnist.train.next_batch(batch_size)    _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})    if i % 50 == 0 or i == 1:        acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})        print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))# Test Modeltest_x, test_y = mnist.test.images, mnist.test.labelsprint("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
原创粉丝点击