TensorFlow学习日记15
来源:互联网 发布:网络币 编辑:程序博客网 时间:2024/05/29 06:54
1. Basic Operations on multi-GPU
解析:
from __future__ import print_function'''Basic Multi GPU computation example using TensorFlow library.''''''This tutorial requires your machine to have 2 GPUs"/cpu:0": The CPU of your machine."/gpu:0": The first GPU of your machine"/gpu:1": The second GPU of your machine'''import numpy as npimport tensorflow as tfimport datetime# Processing Units logslog_device_placement = True# Num of multiplications to performn = 10'''Example: compute A^n + B^n on 2 GPUsResults on 8 cores with 2 GTX-980: * Single GPU computation time: 0:00:11.277449 * Multi GPU computation time: 0:00:07.131701'''# Create random large matrixA = np.random.rand(10000, 10000).astype('float32')B = np.random.rand(10000, 10000).astype('float32')# Create a graph to store resultsc1 = []c2 = []def matpow(M, n): if n < 1: # Abstract cases where n < 1 return M else: return tf.matmul(M, matpow(M, n - 1))'''Single GPU computing'''with tf.device('/gpu:0'): a = tf.placeholder(tf.float32, [10000, 10000]) b = tf.placeholder(tf.float32, [10000, 10000]) # Compute A^n and B^n and store results in c1 c1.append(matpow(a, n)) c1.append(matpow(b, n))with tf.device('/cpu:0'): sum = tf.add_n(c1) # Addition of all elements in c1, i.e. A^n + B^nt1_1 = datetime.datetime.now()with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: # Run the op. sess.run(sum, {a: A, b: B})t2_1 = datetime.datetime.now()'''Multi GPU computing'''# GPU:0 computes A^nwith tf.device('/gpu:0'): # Compute A^n and store result in c2 a = tf.placeholder(tf.float32, [10000, 10000]) c2.append(matpow(a, n))# GPU:1 computes B^nwith tf.device('/gpu:1'): # Compute B^n and store result in c2 b = tf.placeholder(tf.float32, [10000, 10000]) c2.append(matpow(b, n))with tf.device('/cpu:0'): sum = tf.add_n(c2) # Addition of all elements in c2, i.e. A^n + B^nt1_2 = datetime.datetime.now()with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess: # Run the op. sess.run(sum, {a: A, b: B})t2_2 = datetime.datetime.now()print("Single GPU computation time: " + str(t2_1 - t1_1))print("Multi GPU computation time: " + str(t2_2 - t1_2))
2. Train a Neural Network on multi-GPU
解析:
''' Multi-GPU Training Example.Train a convolutional neural network on multiple GPU with TensorFlow.'''from __future__ import division, print_function, absolute_importimport numpy as npimport tensorflow as tfimport time# Import MNIST datafrom tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets("data/", one_hot=True)# Training Parametersnum_gpus = 2num_steps = 200learning_rate = 0.001batch_size = 1024display_step = 10# Network Parametersnum_input = 784 # MNIST data input (img shape: 28*28)num_classes = 10 # MNIST total classes (0-9 digits)dropout = 0.75 # Dropout, probability to keep units# Build a convolutional neural networkdef conv_net(x, n_classes, dropout, reuse, is_training): # Define a scope for reusing the variables with tf.variable_scope('ConvNet', reuse=reuse): # MNIST data input is a 1-D vector of 784 features (28*28 pixels) # Reshape to match picture format [Height x Width x Channel] # Tensor input become 4-D: [Batch Size, Height, Width, Channel] x = tf.reshape(x, shape=[-1, 28, 28, 1]) # Convolution Layer with 64 filters and a kernel size of 5 x = tf.layers.conv2d(x, 64, 5, activation=tf.nn.relu) # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 x = tf.layers.max_pooling2d(x, 2, 2) # Convolution Layer with 256 filters and a kernel size of 5 x = tf.layers.conv2d(x, 256, 3, activation=tf.nn.relu) # Convolution Layer with 512 filters and a kernel size of 5 x = tf.layers.conv2d(x, 512, 3, activation=tf.nn.relu) # Max Pooling (down-sampling) with strides of 2 and kernel size of 2 x = tf.layers.max_pooling2d(x, 2, 2) # Flatten the data to a 1-D vector for the fully connected layer x = tf.contrib.layers.flatten(x) # Fully connected layer (in contrib folder for now) x = tf.layers.dense(x, 2048) # Apply Dropout (if is_training is False, dropout is not applied) x = tf.layers.dropout(x, rate=dropout, training=is_training) # Fully connected layer (in contrib folder for now) x = tf.layers.dense(x, 1024) # Apply Dropout (if is_training is False, dropout is not applied) x = tf.layers.dropout(x, rate=dropout, training=is_training) # Output layer, class prediction out = tf.layers.dense(x, n_classes) # Because 'softmax_cross_entropy_with_logits' loss already apply # softmax, we only apply softmax to testing network out = tf.nn.softmax(out) if not is_training else out return outdef average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads# Place all ops on CPU by defaultwith tf.device('/cpu:0'): tower_grads = [] reuse_vars = False # tf Graph input X = tf.placeholder(tf.float32, [None, num_input]) Y = tf.placeholder(tf.float32, [None, num_classes]) # Loop over all GPUs and construct their own computation graph for i in range(num_gpus): with tf.device('/gpu:%d' % i): # Split data between GPUs _x = X[i * batch_size: (i + 1) * batch_size] _y = Y[i * batch_size: (i + 1) * batch_size] # Because Dropout have different behavior at training and prediction time, we # need to create 2 distinct computation graphs that share the same weights. # Create a graph for training logits_train = conv_net(_x, num_classes, dropout, reuse=reuse_vars, is_training=True) # Create another graph for testing that reuse the same weights logits_test = conv_net(_x, num_classes, dropout, reuse=True, is_training=False) # Define loss and optimizer (with train logits, for dropout to take effect) loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits=logits_train, labels=_y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) grads = optimizer.compute_gradients(loss_op) # Only first GPU compute accuracy if i == 0: # Evaluate model (with test logits, for dropout to be disabled) correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.argmax(_y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) reuse_vars = True tower_grads.append(grads) tower_grads = average_gradients(tower_grads) train_op = optimizer.apply_gradients(tower_grads) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start Training with tf.Session() as sess: # Run the initializer sess.run(init) # Keep training until reach max iterations for step in range(1, num_steps + 1): # Get a batch for each GPU batch_x, batch_y = mnist.train.next_batch(batch_size * num_gpus) # Run optimization op (backprop) ts = time.time() sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) te = time.time() - ts if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y}) print("Step " + str(step) + ": Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc) + ", %i Examples/sec" % int(len(batch_x) / te)) step += 1 print("Optimization Finished!") # Calculate accuracy for MNIST test images print("Testing Accuracy:", \ np.mean([sess.run(accuracy, feed_dict={X: mnist.test.images[i:i + batch_size], Y: mnist.test.labels[i:i + batch_size]}) for i in range(0, len(mnist.test.images), batch_size)]))
参考文献:
[1] TensorFlow-Examples:https://github.com/aymericdamien/TensorFlow-Examples
阅读全文
0 0
- TensorFlow学习日记15
- TensorFlow学习日记1
- TensorFlow学习日记2
- TensorFlow学习日记3
- TensorFlow学习日记4
- TensorFlow学习日记5
- TensorFlow学习日记6
- TensorFlow学习日记7
- TensorFlow学习日记8
- TensorFlow学习日记10
- TensorFlow学习日记11
- TensorFlow学习日记12
- TensorFlow学习日记13
- TensorFlow学习日记14
- TensorFlow学习日记16
- TensorFlow学习日记17
- TensorFlow学习日记18
- TensorFlow学习日记19
- 浅谈响应式
- 监听浏览器复制粘贴事件
- 软件测试方法——单元测试、集成测试、系统测试、确认测试
- mysql存储过程的3种参数模型详细说明
- 最优化方法预备知识
- TensorFlow学习日记15
- 控制反转(IOC)和依赖注入(DI)的区别
- 浏览器加载、解析、渲染的过程
- 笨方法学习Python-习题37: 复习各种符号
- 每日一练2017-10-30
- 谁的青春没有遗憾?RNG你们已经很棒了
- 量子通讯加密技术的技术原理
- LeetCode Weekly Contest 56 Find K-th Smallest Pair Distance
- ROS与Android的窃窃私语