tensorflow分类和dropout解决overfitting

来源:互联网 发布:死或生5优化 编辑:程序博客网 时间:2024/04/26 03:31
import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_data# number 1 to 10 datamnist = input_data.read_data_sets('MNIST_data', one_hot=True)def add_layer(inputs, in_size, out_size, activation_function=None,):    # add one more layer and return the output of this layer    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1,)    Wx_plus_b = tf.matmul(inputs, Weights) + biases    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b,)    return outputsdef compute_accuracy(v_xs, v_ys):    global prediction    y_pre = sess.run(prediction, feed_dict={xs: v_xs})    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})    return result# define placeholder for inputs to networkxs = tf.placeholder(tf.float32, [None, 784]) # 28x28ys = tf.placeholder(tf.float32, [None, 10])# add output layerprediction = add_layer(xs, 784, 10,  activation_function=tf.nn.softmax)# the error between prediction and real datacross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),                                              reduction_indices=[1]))       # losstrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)sess = tf.Session()# important stepinit = tf.global_variables_initializer()sess.run(init)for i in range(1000):    batch_xs, batch_ys = mnist.train.next_batch(100)    sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})    if i % 50 == 0:        print(compute_accuracy(            mnist.test.images, mnist.test.labels))        import tensorflow as tffrom sklearn.datasets import load_digitsfrom sklearn.cross_validation import train_test_splitfrom sklearn.preprocessing import LabelBinarizer# load datadigits = load_digits()X = digits.datay = digits.targety = LabelBinarizer().fit_transform(y)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3)def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):    # add one more layer and return the output of this layer    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )    Wx_plus_b = tf.matmul(inputs, Weights) + biases    # here to dropout    Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b, )    tf.summary.histogram(layer_name + '/outputs', outputs)    return outputs# define placeholder for inputs to networkkeep_prob = tf.placeholder(tf.float32)xs = tf.placeholder(tf.float32, [None, 64])  # 8x8ys = tf.placeholder(tf.float32, [None, 10])# add output layerl1 = add_layer(xs, 64, 50, 'l1', activation_function=tf.nn.tanh)prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax)# the loss between prediction and real datacross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),                                              reduction_indices=[1]))  # losstf.summary.scalar('loss', cross_entropy)train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)sess = tf.Session()merged = tf.summary.merge_all()# summary writer goes in heretrain_writer = tf.summary.FileWriter("logs/train", sess.graph)test_writer = tf.summary.FileWriter("logs/test", sess.graph)# tf.initialize_all_variables() no long valid from# 2017-03-02 if using tensorflow >= 0.12init = tf.global_variables_initializer()sess.run(init)for i in range(500):    # here to determine the keeping probability    sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})    if i % 50 == 0:        # record loss        train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})        test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})        train_writer.add_summary(train_result, i)        test_writer.add_summary(test_result, i)

原创粉丝点击