tensorflow实现的一个三层神经网络
来源:互联网 发布:银联数据2017offer待遇 编辑:程序博客网 时间:2024/05/19 14:40
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 13 16:38:38 2016
cnn of myself,today!
@author: root
"""
import tensorflow as tf
import numpy as np
'''
inputs : input x set
input_feature_size : input feature size
output_feature_size : output featutre size
activation_function : activation function
'''
def add_layer(inputs , input_feature_size , output_feature_size , activation_function = None):
Weights = tf.Variable(tf.random_normal([input_feature_size , output_feature_size]))
bias = tf.Variable(tf.zeros([1 , output_feature_size]) + 0.1)
Wx_plus_bias = tf.matmul(inputs , Weights) + bias
if(activation_function != None):
outputs = activation_function(Wx_plus_bias)
else :
outputs = Wx_plus_bias
return outputs
#x_data shape : 300 1
x_data = np.linspace(-1 , 1 , 300 , dtype = np.float32)[: , np.newaxis]
#with the same shape to x_data
noise = np.random.normal(0 , 0.05 , x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
'''
define two holder
'''
x_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1])
y_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1])
'''
cnn : input layer , hidden layer , output layer
input layer : x_data , 1
hidden layer : x_data , 1 ===> x_data.lines.num , 10
output layer : x_data.lines.num , 1
'''
l1 = add_layer(x_holder , 1 , 10 , activation_function = tf.nn.relu)
l2 = add_layer(l1 , 10 , 1 , activation_function = None)
#define the loss
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(y_holder - l2) , 1))
#define the proccess of train
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(2000):
sess.run(train , feed_dict={x_holder : x_data, y_holder : y_data})
if(i % 50 == 0):
print sess.run(loss , feed_dict={x_holder : x_data, y_holder : y_data})
"""
Created on Sat Aug 13 16:38:38 2016
cnn of myself,today!
@author: root
"""
import tensorflow as tf
import numpy as np
'''
inputs : input x set
input_feature_size : input feature size
output_feature_size : output featutre size
activation_function : activation function
'''
def add_layer(inputs , input_feature_size , output_feature_size , activation_function = None):
Weights = tf.Variable(tf.random_normal([input_feature_size , output_feature_size]))
bias = tf.Variable(tf.zeros([1 , output_feature_size]) + 0.1)
Wx_plus_bias = tf.matmul(inputs , Weights) + bias
if(activation_function != None):
outputs = activation_function(Wx_plus_bias)
else :
outputs = Wx_plus_bias
return outputs
#x_data shape : 300 1
x_data = np.linspace(-1 , 1 , 300 , dtype = np.float32)[: , np.newaxis]
#with the same shape to x_data
noise = np.random.normal(0 , 0.05 , x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
'''
define two holder
'''
x_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1])
y_holder = tf.placeholder(dtype = np.float32 , shape = [None , 1])
'''
cnn : input layer , hidden layer , output layer
input layer : x_data , 1
hidden layer : x_data , 1 ===> x_data.lines.num , 10
output layer : x_data.lines.num , 1
'''
l1 = add_layer(x_holder , 1 , 10 , activation_function = tf.nn.relu)
l2 = add_layer(l1 , 10 , 1 , activation_function = None)
#define the loss
loss = tf.reduce_mean(tf.reduce_sum(tf.abs(y_holder - l2) , 1))
#define the proccess of train
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(2000):
sess.run(train , feed_dict={x_holder : x_data, y_holder : y_data})
if(i % 50 == 0):
print sess.run(loss , feed_dict={x_holder : x_data, y_holder : y_data})
0 0
- tensorflow实现的一个三层神经网络
- 用js实现一个三层的bp神经网络
- Python搭建tensorflow三层神经网络
- TensorFlow实现进阶的神经网络
- 最简单的三层神经网络Matlab实现
- 一个简单的matlab项目实现三层神经网络的简单应用
- Tensorflow构建一个简单的神经网络
- Tensorflow 构造一个简单的神经网络
- 利用tensorflow构造一个简单的神经网络
- tensorflow 经典卷积神经网络的实现
- tensorflow入门之实现单隐层的神经网络
- tensorflow实现简单的卷积神经网络
- 卷积神经网络LeNet5,基于TensorFlow的实现
- TensorFlow-6实现进阶的卷积神经网络
- 简单卷积神经网络的tensorflow实现
- Tensorflow实例:实现简单的卷积神经网络
- Tensorflow实例:实现进阶的卷积神经网络
- 使用TensorFlow实现一个文本分类的卷积神经网络Implementing a CNN for Text Classification in TensorFlow
- bootloader使用SLRAM设备向Kernel传递块数据
- URAL 2019 Pair: normal and paranormal 暴力?
- 算法竞赛入门经典第九章例题9-2 uva 437 巴比伦塔
- JSONObject.fromObject--JSON与对象的转换
- 用Python Scikit-learn 实现机器学习十大算法--朴素贝叶斯算法(文末有代码)
- tensorflow实现的一个三层神经网络
- Android官方文档之Location and Sensors APIs(中)
- 局部内部类访问的局部变量不必用final修饰,java8中
- 【图像识别】图像特征提取备忘
- 【bzoj1196】【HNOI2006】【公路修建问题】【并查集】
- iOS - @property 和 @synthesize 总结
- java编写求集合的全体子集
- 学习python的心得
- Codeforces Gym 101061B RGB plants