多层感知机模型在mnist上的例子

来源:互联网 发布:统计学博士 知乎 编辑:程序博客网 时间:2024/05/20 14:22

前面实现了单向LSTM和双向LSTM模型,基于tensorflow实现了双隐含层的感知机模型:

#!/usr/bin/env python# -*- coding: utf-8 -*-# created by fhqplzj on 2017/06/20 下午2:56from __future__ import print_functionimport tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets('/Users/fhqplzj/github/TensorFlow-Examples/examples/3_NeuralNetworks/data',                                  one_hot=True)learning_rate = 0.001training_epoches = 15batch_size = 100display_step = 1n_hidden_1 = 256n_hidden_2 = 256n_input = 784n_classes = 10x = tf.placeholder(tf.float32, [None, n_input])y = tf.placeholder(tf.float32, [None, n_classes])def multilayer_perceptron(x, weights, biases):    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])    layer_1 = tf.nn.relu(layer_1)    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])    layer_2 = tf.nn.relu(layer_2)    output_layer = tf.matmul(layer_2, weights['out']) + biases['out']    return output_layerweights = {    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))}biases = {    'b1': tf.Variable(tf.random_normal([n_hidden_1])),    'b2': tf.Variable(tf.random_normal([n_hidden_2])),    'out': tf.Variable(tf.random_normal([n_classes]))}pred = multilayer_perceptron(x, weights, biases)cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)init = tf.global_variables_initializer()with tf.Session() as sess:    sess.run(init)    for epoch in range(training_epoches):        avg_cost = 0.0        total_batch = mnist.train.num_examples // batch_size        for i in range(total_batch):            batch_x, batch_y = mnist.train.next_batch(batch_size)            _, c = sess.run([optimizer, cost], feed_dict={                x: batch_x,                y: batch_y            })            avg_cost += c / total_batch        if epoch % display_step == 0:            print('epoch:{:04d}\tcost:{:.9f}'.format(epoch + 1, avg_cost))    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    print('accuracy={:.9f}'.format(accuracy.eval({        x: mnist.test.images,        y: mnist.test.labels    })))


阅读全文
0 0
原创粉丝点击