tensorflow线性回归例子
来源:互联网 发布:百度地图lbs数据 编辑:程序博客网 时间:2024/06/02 04:59
loss = tf.reduce_mean(tf.square(y - y_))optimizer = tf.train.GradientDescentOptimizer(0.5)线性回归最常用的耗费函数就是MSE均方误差
多变量线性回归例子
from __future__ import print_functionfrom __future__ import absolute_importimport osos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'import sysimport loggingimport numpy as npimport tensorflow as tfdef load_data(): datafile = 'data/ex1data2.txt' #Read into the data file cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True) # [[x1 x1 x1 x1 x1], # [x2 x2 x2 x2 x2], # [y y y y y]] X = np.transpose(np.array(cols[:-1])) y = np.transpose(np.array(cols[-1:])) # X,Y: [[x1 x2], # [x1 x2], # [x1 x2]] stored_feature_means, stored_feature_stds = [], [] #寸均值和方差 Xnorm = X.copy() for icol in range(Xnorm.shape[1]): stored_feature_means.append(np.mean(Xnorm[:,icol])) stored_feature_stds.append(np.std(Xnorm[:,icol])) #Skip the first column #问题:为什么跳过第一行,又没加过1 #if not icol: continue #Faster to not recompute the mean and std again, just used stored values Xnorm[:,icol] = (Xnorm[:,icol] - stored_feature_means[-1])/stored_feature_stds[-1] return Xnorm, ytrain_X, train_y = load_data()# placeholderX = tf.placeholder("float", [None, 2])# modelb = tf.Variable(tf.zeros([1]))W = tf.Variable(tf.zeros([2, 1]))y = tf.matmul(X, W) + b# minimize mean squared error# 线性回归最常用的耗费函数就是MSE均方误差y_ = tf.placeholder("float", [None, 1])loss = tf.reduce_mean(tf.square(y - y_))optimizer = tf.train.GradientDescentOptimizer(0.5)train = optimizer.minimize(loss)# initialize variableinit = tf.global_variables_initializer()# 启动图 (graph)sess = tf.Session()sess.run(init)#也可以sess.run(tf.global_variables_initializer())sess.run(train, feed_dict={X: train_X, y_: train_y})print(sess.run(W), sess.run(b))
不用占位符的例子
import tensorflow as tfimport numpy as npimport osos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'# 使用 NumPy 生成假数据(phony data), 总共 100 个点.x_data = np.float32(np.random.rand(2, 100)) # 随机输入y_data = np.dot([0.100, 0.200], x_data) + 0.300print(x_data.shape)print(y_data.shape)# 构造一个线性模型# b = tf.Variable(tf.zeros([1]))W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))y = tf.matmul(W, x_data) + b# 最小化方差loss = tf.reduce_mean(tf.square(y - y_data))optimizer = tf.train.GradientDescentOptimizer(0.5)train = optimizer.minimize(loss)# 初始化变量init = tf.global_variables_initializer()# 启动图 (graph)sess = tf.Session()sess.run(init)# 拟合平面for step in range(0, 201): sess.run(train) if step % 20 == 0: print(step, sess.run(W), sess.run(b))
1 0
- tensorflow线性回归例子
- tensorflow 实现线性回归
- tensorflow线性回归
- Tensorflow实现线性回归
- TensorFlow之线性回归
- tensorflow之线性回归
- Tensorflow实现线性回归
- TensorFlow实现线性回归
- tensorflow线性回归测试
- TensorFlow之线性回归
- Tensorflow-线性回归
- Tensorflow-线性回归
- tensorflow 线性回归
- TensorFlow训练线性回归
- Tensorflow-note-线性回归
- tensorflow 一元线性回归
- 【TensorFlow】TensorFlow的线性回归
- tensorflow逻辑回归例子
- 包含多个段的程序
- 344. Reverse String
- 通过流实现文件读写的方法(中)
- c 语言中的宏定义
- android通过webservice连接SQL数据库(一)服务器端
- tensorflow线性回归例子
- 贪心算法基础之活动时间安排(二) 51nod 贪心教程
- 腾讯加固脱壳
- 46. Permutations
- Binder的使用
- Linux下抓包工具tcpdump的使用
- 快排算法C++实现
- 【linux】编写一个简单的shell
- Cocos2d-x lua学习笔记