tensorflow 分布式 数据并行 异步训练 between-graph 自己写的实例 RNN
来源:互联网 发布:网络经营烘焙许可证 编辑:程序博客网 时间:2024/04/29 18:36
#运行方法见上两篇文章import tensorflow as tfFLAGS = tf.app.flags.FLAGStf.app.flags.DEFINE_string('job_name', '', 'One of "ps", "worker"')tf.app.flags.DEFINE_string('ps_hosts', '', """Comma-separated list of hostname:port for the """ """parameter server jobs. e.g. """ """'machine1:2222,machine2:1111,machine2:2222'""")tf.app.flags.DEFINE_string('worker_hosts', '', """Comma-separated list of hostname:port for the """ """worker jobs. e.g. """ """'machine1:2222,machine2:1111,machine2:2222'""")tf.app.flags.DEFINE_integer( 'task_id', 0, 'Task id of the replica running the training.')ps_hosts = FLAGS.ps_hosts.split(',')worker_hosts = FLAGS.worker_hosts.split(',')cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,'worker': worker_hosts})server = tf.train.Server( {'ps': ps_hosts,'worker': worker_hosts}, job_name=FLAGS.job_name, task_index=FLAGS.task_id)print("!!!!")if FLAGS.job_name == 'ps': server.join()print("!!!!") from tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("./", one_hot=True)# Parameterslearning_rate = 0.001training_iters = 100000batch_size = 128display_step = 10# Network Parametersn_input = 28 # MNIST data input (img shape: 28*28)n_steps = 28 # timestepsn_hidden = 128 # hidden layer num of featuresn_classes = 10 # MNIST total classes (0-9 digits)def RNN(x, weights, biases): # Prepare data shape to match `rnn` function requirements # Current data input shape: (batch_size, n_steps, n_input) # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) # Permuting batch_size and n_steps x = tf.transpose(x, [1, 0, 2]) # Reshaping to (n_steps*batch_size, n_input) x = tf.reshape(x, [-1, n_input]) # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input) x = tf.split(0, n_steps, x) # Define a lstm cell with tensorflow lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0) # Get lstm cell output outputs, states = tf.nn.rnn(lstm_cell, x, dtype=tf.float32) # Linear activation, using rnn inner loop last output return tf.matmul(outputs[-1], weights['out']) + biases['out']with tf.device(tf.train.replica_device_setter( worker_device="/job:worker/task:%d" % FLAGS.task_id, cluster=cluster_spec)): # tf Graph input x = tf.placeholder("float", [None, n_steps, n_input]) y = tf.placeholder("float", [None, n_classes])# Define weights weights = { 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) } biases = { 'out': tf.Variable(tf.random_normal([n_classes])) } pred = RNN(x, weights, biases)# Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)# Evaluate model correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))# Initializing the variables # Initializing the variables global_step = tf.Variable(0, name='global_step', trainable=False) init = tf.global_variables_initializer() saver = tf.train.Saver() tf.scalar_summary('cost', cost) summary_op = tf.merge_all_summaries()sv = tf.train.Supervisor(is_chief=(FLAGS.task_id == 0), logdir="C:\\Users\\guotong1\\Desktop\\checkpoint", init_op=init, summary_op=None, saver=saver, global_step=global_step, save_model_secs=60)# Launch the graphwith sv.managed_session(server.target) as sess: sess.run(init) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y = mnist.train.next_batch(batch_size) # Reshape data to get 28 seq of 28 elements batch_x = batch_x.reshape((batch_size, n_steps, n_input)) # Run optimization op (backprop) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % display_step == 0: # Calculate batch accuracy acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) # Calculate batch loss loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) step += 1 print("Optimization Finished!") # Calculate accuracy for 128 mnist test images test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={x: test_data, y: test_label}))sv.stop()
0 0
- tensorflow 分布式 数据并行 异步训练 between-graph 自己写的实例 RNN
- tensorflow 分布式 数据并行 异步训练 between-graph 自己写的实例 CNN
- tensorflow 分布式 数据并行 异步训练 between-graph 实例
- tensorflow 分布式 数据并行 同步训练 between-graph 实例
- tensorflow 分布式 数据并行 in-graph 自己写的实例
- tensorflow 分布式 全局变量 数据并行 同步训练 in-graph 实例
- tensorflow 分布式 MNIST 实例 ,between-graph,CPU
- 使用Tensorflow训练自己的分割数据
- 使用tensorflow训练自己的数据
- 利用TensorFlow训练简单的RNN
- tensorflow RNN实例
- 配置TensorFlow的objetc_detection api,训练自己的数据
- Tensorflow 训练自己的数据集(二)(TFRecord)
- 利用TensorFlow Object Detection API 训练自己的数据集
- TensorFlow——训练自己的数据(一)数据处理
- Tensorflow-SSD测试及训练自己的数据集
- Tensorflow + ResNet101 + fasterRcnn 训练自己的模型 数据(一)
- 用Tensorflow Object Detection API 训练自己的数据集
- Android studio快捷键
- spring 初始小例子
- 从零开始学java2
- java线程池介绍(java并发编程实战第6章)
- java中xml与object互转(笔记)
- tensorflow 分布式 数据并行 异步训练 between-graph 自己写的实例 RNN
- android UDP多点广播,并自动建立Socket连接
- 导项目出现NoClassDefFoundError
- 第一次使用CSDN的博客,立贴以示纪念
- ionic 列表页+详细页面Demo示例
- 向python脚本传递参数
- 软件安装时窗口出现在屏幕左上角而且拖不出来
- Java读写文件方法总结
- LineageOS 14.1 for Xiaomi Redmi 3/Prime