Tensorflow-LSTMRNN例子
来源:互联网 发布:linux tee 命令 编辑:程序博客网 时间:2024/05/30 04:33
使用Rnn预测一段波形的序列
导入模块
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.contrib import rnn
设置参数
batch_start = 0 #建立batch_data 的索引
time_steps = 20 #反向传播通过时间的time_steps
batch_size = 50
training_steps = 200
input_size = 1 #sin 数据输入 size
output_size = 1 #cos 数据输出 size
cell_size = 10 #RNN的隐藏单位的大小
learning_rate = 0.001 # 学习率
生成数据
def get_batch(): global batch_start,time_steps # sx ==>(50batch, 20steps) 时间点Data sx = np.arange(batch_start,batch_start+time_steps*batch_size) .reshape((batch_size,time_steps))/(10*np.pi) seq = np.sin(sx) res = np.cos(sx) batch_start += time_steps # return shape==> (batch, step, input) return [seq[:, :, np.newaxis], res[:, :, np.newaxis], sx]
定义LTSMRNN主体结构
class LSTMRNN(object): def __init__(self, n_steps, input_size, output_size, cell_size, batch_size): self.n_steps = n_steps self.input_size = input_size self.output_size = output_size self.cell_size = cell_size self.batch_size = batch_size with tf.name_scope('inputs'): self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size], name='xs') self.ys = tf.placeholder(tf.float32, [None, n_steps, output_size], name='ys') with tf.variable_scope('in_hidden'): self.add_input_layer() with tf.variable_scope('LSTM_cell'): self.add_cell() with tf.variable_scope('out_hidden'): self.add_output_layer() with tf.name_scope('cost'): self.compute_cost() with tf.name_scope('train'): self.train_op = tf.train .AdamOptimizer(learning_rate) .minimize(self.cost)
添加输入层
def add_input_layer(self): l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size) # Ws (in_size, cell_size) Ws_in = self._weight_variable([self.input_size, self.cell_size]) # bs (cell_size, ) bs_in = self._bias_variable([self.cell_size,]) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope('Wx_plus_b'): l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in # reshape l_in_y ==> (batch, n_steps, cell_size) self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size], name='2_3D')
添加RNN的cell
def add_cell(self): lstm_cell = rnn.BasicLSTMCell(self.cell_size) with tf.name_scope('initial_state'): self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32) self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
添加输出层
def add_output_layer(self): # shape ==> (batch * steps, cell_size) l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D') Ws_out = self._weight_variable([self.cell_size, self.output_size]) bs_out = self._bias_variable([self.output_size, ]) # shape = (batch * steps, output_size) with tf.name_scope('Wx_plus_b'): self.pred = tf.matmul(l_out_x, Ws_out) + bs_out
计算损失和评估模型
def compute_cost(self): losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size,name='average_cost') tf.summary.scalar('cost', self.cost)def ms_error(self, labels, logits): return tf.square(tf.subtract(labels, logits))
定义权重变量
def _weight_variable(self, shape, name='weights'): initializer = tf.random_normal_initializer(mean=0., stddev=1.,) return tf.get_variable(shape=shape, initializer=initializer, name=name)
定义偏置变量
def _bias_variable(self, shape, name='biases'): initializer = tf.constant_initializer(0.1) return tf.get_variable(name=name, shape=shape, initializer=initializer)
开始训练测试
if __name__ == '__main__': model = LSTMRNN(time_steps, input_size, output_size, cell_size, batch_size) with tf.Session() as sess: #tensorboard merged = tf.summary.merge_all() writer = tf.summary.FileWriter("logs", sess.graph) sess.run(tf.global_variables_initializer()) plt.ion() plt.show() for i in range(training_steps): seq, res, xs = get_batch() # 提取 batch data if i == 0: # create initial state feed_dict = { model.xs: seq, model.ys: res } else : # use last state as the initial state for this run feed_dict = { model.xs: seq, model.ys: res, model.cell_init_state: state } _, cost, state, pred = sess.run( [model.train_op, model.cost, model.cell_final_state, model.pred],feed_dict=feed_dict) # 画线 plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:time_steps], 'b--') plt.ylim((-1.2, 1.2)) plt.draw() plt.pause(0.3) if i % 20 == 0: print('cost: ', round(cost, 4)) result = sess.run(merged, feed_dict) writer.add_summary(result, i)
运输结果:
上面两幅图都是数据上的展示 从波形图看,LSTMRNN的准确度越来越高,基本已经和sin曲线吻合了。计算的偏差也是越来越低,偏差小于0.1。
在tensorboard可以看到神经图,图的结构如下:
图的左边就是这个例子的LSTMRNN的组成结构 有三层的layer组成,两个hidden layer (input/output)+一个cell。
展开这三个layer
hidden layer 包括一个权重和偏执和输入数据,在inputer layer的输入数据先转成二维的数据在输出成3维的数据,在output layer中将cell输出的数据再转成二维数据再经过Wx_plus_b输出结果。
在cell中包括一个rnn的cell和一个initial_state,initial_state在每一次batch循环时被final_state代替。
tf中的bppt如下:
- Tensorflow-LSTMRNN例子
- 【tensorflow】tensorflow入门小例子
- tensorflow 小例子笔记
- tensorflow线性回归例子
- tensorflow逻辑回归例子
- Tensorflow-简单的例子
- Tensorflow mnist 例子
- TensorFlow简单例子
- tensorflow CNN实际例子
- 深度学习 tensorflow例子
- Tensorflow-LSTM RNN 例子
- Tensorflow中的mnist例子
- Tensorflow官网lstm例子
- tensorflow 神经网络入门例子
- TensorFlow入门例子(0)
- Tensorflow入门例子(1)
- tensorflow 简单的例子
- tensorflow mnist例子流程浅析
- CDH集群中maptask的日志文件的位置整理
- 前端性能优化
- redis客户端jedis基于spring搭建单节点或者集群线程池连接
- 解决ie8及更低版本的浏览器强制升级代码
- Java中的粗粒度和细粒度的含义,那些地方有用到。
- Tensorflow-LSTMRNN例子
- [webGL学习]基于three.js构建WebGL实例第一讲
- Java开发中的23种设计模式详解
- jsp通过URL传参
- 输入一颗二元查找树,将该树转换为它的镜像, 即在转换后的二元查找树中,左子树的结点都大于右子树的结点。 用递归和循环两种方法完成树的镜像转换:
- javascript 中arguments、call、apply、bind、callee、caller属性的简单理解
- java8新特性
- boot used 100% 分区空间不足问题解决
- 使用java调用http请求系列--HttpClient