tensorflow总结帖

来源:互联网 发布:os x 怎么恢复mac系统 编辑:程序博客网 时间:2024/05/16 19:54

变量定义:

#随机产生100个数,类型为float32x_data = np.random.rand(100).astype(np.float32)y_data = x_data*0.1 + 0.3x_data = np.linspace(-1,1,300)[:, np.newaxis]noise = np.random.normal(0, 0.05, x_data.shape)y_data = np.square(x_data) - 0.5 + noise

graph构造部分:
tf一般框架

Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))biases = tf.Variable(tf.zeros([1]))y = Weights*x_data + biasesloss = tf.reduce_mean(tf.square(y-y_data))optimizer = tf.train.GradientDescentOptimizer(0.5)train = optimizer.minimize(loss)init = tf.global_variables_initializer()
#tf构建变量Weights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))biases = tf.Variable(tf.zeros([1]))Weights = tf.Variable(tf.random_normal([in_size, out_size]))biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)W = tf.Variable([[1,2,3],[3,4,5]], dtype=tf.float32, name='weights') #变量值为常量,类型以及名字state = tf.Variable(0, name='counter') #带名字的变量#tf构建常量matrix1 = tf.constant([[3, 3]])matrix2 = tf.constant([[2],                       [2]])one = tf.constant(1)

tf操作

product = tf.matmul(matrix1, matrix2)# 矩阵乘积Wx_plus_b = tf.matmul(inputs, Weights) + biases #依旧是矩阵乘法new_value = tf.add(state, one) #两数相加 update = tf.assign(state, new_value) # new_value->stateouput = tf.multiply(input1, input2) #两数相乘

tf激活函数

tf.nn.relu

placeholder操作(占位符,sess接收input值)

input1 = tf.placeholder(tf.float32)input2 = tf.placeholder(tf.float32)#设置占位符的sizexs = tf.placeholder(tf.float32, [None, 1])with tf.Session() as sess:    print(sess.run(ouput, feed_dict={input1: [7.], input2: [2.]}))    print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))    #无论是查看中间变量还是训练,都要加上feed_dict

tf优化函数

train = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # SGDtrain_step = tf.train.AdamOptimizer(1e-4).minimize(loss) # Adam

tf建立一个单层NN网络,可以用的函数

def add_layer(inputs, in_size, out_size, activation_function=None):    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)    Wx_plus_b = tf.matmul(inputs, Weights) + biases    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b)    return outputs

计算测试集上效果的函数(因为想要得到的测试集上的结果和loss不一样,所以要重新建一个graph
先 run 得到一个中间值,再利用中间值建立一个graph,在从头到尾run一次(分次run的思想)

def compute_accuracy(v_xs, v_ys):    global prediction    y_pre = sess.run(prediction, feed_dict={xs: v_xs})    correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))    result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})    return result

计算交叉熵

cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),                                              reduction_indices=[1]))       # loss

单层网络加上dropout机制

def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):    # add one more layer and return the output of this layer    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )    Wx_plus_b = tf.matmul(inputs, Weights) + biases    # here to dropout    Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b, )    tf.summary.histogram(layer_name + '/outputs', outputs)    return outputs#训练时dropout要设置为0.5sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})#显示网络结果时,要用全部网络,dropout为1train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})

BN代码,最主要的是ema对于值的更新(train和test不一样)
传送门
BN代码主要部分,虽然有很多看的不是很懂。

 # Batch Normalize            fc_mean, fc_var = tf.nn.moments(                Wx_plus_b,                axes=[0],   # the dimension you wanna normalize, here [0] for batch                            # for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel            )            scale = tf.Variable(tf.ones([out_size]))            shift = tf.Variable(tf.zeros([out_size]))            epsilon = 0.001            # apply moving average for mean and var when train on batch            ema = tf.train.ExponentialMovingAverage(decay=0.5)            def mean_var_with_update():                ema_apply_op = ema.apply([fc_mean, fc_var])                with tf.control_dependencies([ema_apply_op]):                    return tf.identity(fc_mean), tf.identity(fc_var)            mean, var = mean_var_with_update()            Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)            # similar with this two steps:            # Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)            # Wx_plus_b = Wx_plus_b * scale + shift

session部分:

#session一般结构sess = tf.Session()sess.run(init)for step in range(201):    sess.run(train)    if step % 20 == 0:        print(step, sess.run(Weights))# 多个session一起运行 _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
0 0
原创粉丝点击