tensorflow几个函数讲解

来源:互联网 发布:网络电视能连跳舞毯吗 编辑:程序博客网 时间:2024/06/06 13:25

tensorflow几个函数讲解

博主原创
多看tensorflow API

1、tf.clip_by_value(a,1e-10,1.0)
将a的值限制在1e-10—1.0之间

#coding:utf-8import tensorflow as tfa = tf.constant([[1.0,2.0]])b = tf.clip_by_value(a,1e-10,1.0)with tf.Session() as sess:    print sess.run(b.eval())

2、tf.truncated_normal()
生成截断正态分布

3、a = tf.equal(x,y)
对x,y中每一个数(元素)进行比较,返回布尔值列表

4、tf.constant()
创建常量

5、tf.placeholder()
创建placeholder,用于被喂数据

x = tf.placeholder(tf.float32,shape=(1,2),name="input")例:import tensorflow as tfw1 = tf.Variable(tf.random_normal([2,3],stddev=1))w2 = tf.Variable(tf.random_normal([3,1],stddev=1))x = tf.placeholder(tf.float32,shape=(1,2),name="input")a = tf.matmul(x,w1)y = tf.matmul(a,w2)with tf.Session() as sess:    init_op = tf.global_variables_initializer()    sess.run(init_op)    feed_dict = {x:[[0.7,0.9]]}    print sess.run(y,feed_dict)

6、tf.random_normal_initializer()
生成正态分布

7、tf.Session()
启动会话

import tensorflow as tfw1 = tf.Variable(tf.random_normal([3,3],stddev=1,seed=1))w2 = tf.Variable(w1.initialized_value())#使用W1初始化w2x = tf.constant([[0.7,0.9,0.2]])a = tf.matmul(x,w1)y = tf.matmul(a,w2)with tf.Session() as sess:    #sess.run(w1.initializer)    #sess.run(w2.initializer)    init_op = tf.global_variables_initializer()    sess.run(init_op)#初始化所有的变量    print w1.eval()#得到张量的值    print sess.run(y)

8、tf.transpose()
调换维度

x=[[1 2 3]  [4 5 6]]tf.transpose(x, perm=[1, 0]) ==> [[1 4]                                  [2 5]                                  [3 6]]

9、tf.cast()
转换数据类型

# tensor `a` is [1.8, 2.2], dtype=tf.floattf.cast(a, tf.int32) ==> [1, 2]  # dtype=tf.int32#将a变为tf.int32类型

10、tf.argmax()
返回某一个轴上最大值的索引

A = [[1,2,3,4,5]]  B = [[1,2,3], [4,5,6]]with tf.Session() as sess:      print(sess.run(tf.argmax(A, 1)))  ==>[4]    print(sess.run(tf.argmax(B, 1)))  ==>[2 2]

11、tf.expand_dims()
在维度上增加[1]

# 't' is a tensor of shape [2]shape(expand_dims(t, 0)) ==> [1, 2]shape(expand_dims(t, 1)) ==> [2, 1]shape(expand_dims(t, -1)) ==> [2, 1]# 't2' is a tensor of shape [2, 3, 5]shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]

12、tf.concat(concat_dim, values, name=’concat’)
连接张量

t1 = [[1, 2, 3], [4, 5, 6]]t2 = [[7, 8, 9], [10, 11, 12]]tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]# tensor t3 with shape [2, 3]# tensor t4 with shape [2, 3]tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]

13、

vocab_processor=tensorflow.contrib.learn.preprocessing.VocabularyProcessor(max_document_length)x = np.array(list(vocab_processor.fit_transform(x_text)))

用于从语料中学习字典,并且返回索引,x就是索引,如下,两个9说明那里是同一个字
[[ 1 2 3 4 0 0 0]
[ 5 6 7 8 9 10 0]
[11 12 12 13 14 15 9]
[16 17 14 15 18 19 0]]

14、tf.nn.embedding_lookup(params,ids)
在params中查找与ids对应的表示

a = np.asarray([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3], [4.1, 4.2, 4.3]])idx1 = tf.Variable([0, 2, 3, 1], tf.int32)idx2 = tf.Variable([[0, 2, 3, 1], [4, 0, 2, 2]], tf.int32)out1 = tf.nn.embedding_lookup(a, idx1)out2 = tf.nn.embedding_lookup(a, idx2)init = tf.global_variables_initializer()with tf.Session() as sess:    sess.run(init)    print "\n这是out1结果",sess.run(out1)    print out1    print "\n"    print "\n这是out2结果",sess.run(out2)    print out2

15、tf.shape( )
感觉print tf.shape(a)并不好用,不如直接 print a,里面会包含shape,还有type

原创粉丝点击