tensorflow37《TensorFlow实战》笔记-07-01 TensorFlow实现Word2Vec code
来源:互联网 发布:派拉软件股份有限公司 编辑:程序博客网 时间:2024/05/29 05:07
# 《TensorFlow实战》07 TensorFlow实现循环神经网络及Word2Vec# win10 Tensorflow1.0.1 python3.5.3# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1# filename:sz07.01.py # TensorFlow实现Word2Vec# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/word2vec/word2vec_basic.py# tensorflow\tensorflow\examples\tutorials\word2vec\word2vec_basic.pyimport collectionsimport mathimport osimport randomimport zipfileimport numpy as npfrom six.moves import urllibimport tensorflow as tf# 如果下载失败,就手动下载http://mattmahoney.net/dc/text8.zip到sz07.01.py同目录下# http://mattmahoney.net/dc/text8.zipurl = 'http://mattmahoney.net/dc/'def maybe_download(filename, expected_bytes): if not os.path.exists(filename): filename, _ = urllib.request.urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') return filenamefilename = maybe_download('text8.zip', 31344016)def read_data(filename): with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return datawords = read_data(filename)print('Data size', len(words))vocabulary_size = 50000def build_dataset(words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 unk_count += 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionarydata, count, dictionary, reverse_dictionary = build_dataset(words)del wordsprint('Most common words (+UNK)', count[:5])print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])data_index = 0def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) return batch, labelsbatch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])batch_size = 128embedding_size = 128skip_window = 1num_skips = 2valid_size = 16valid_window = 100valid_examples = np.random.choice(valid_window, valid_size, replace=False)num_sampled = 64graph = tf.Graph()with graph.as_default(): train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) with tf.device('/cpu:0'): embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True) init = tf.global_variables_initializer()num_steps = 100001with tf.Session(graph=graph) as session: init.run() print('Initialized') average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 print("Average loss at step ", step, ": ", average_loss) average_loss = 0 if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 nearest = (-sim[i, :]).argsort()[1:top_k+1] log_str = "Nearest to %s:" % valid_word for k in range(top_k): close_word = reverse_dictionary[nearest[k]] log_str = "%s %s," % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval()from sklearn.manifold import TSNEimport matplotlib.pyplot as pltdef plot_with_labels(low_dim_embs, labels, filename='tsne.png'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18,18)) for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x,y), xytext=(5,2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename)tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)plot_only = 100low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])labels = [reverse_dictionary[i] for i in range(plot_only)]plot_with_labels(low_dim_embs, labels)'''Found and verified text8.zipData size 17005207Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]Sample data [5242, 3084, 12, 6, 195, 2, 3137, 46, 59, 156] ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against']3084 originated -> 12 as3084 originated -> 5242 anarchism12 as -> 3084 originated12 as -> 6 a6 a -> 195 term6 a -> 12 as195 term -> 2 of195 term -> 6 aInitializedAverage loss at step 0 : 242.824676514Nearest to this: beatrix, territorial, odo, eileen, bicameral, animists, baptize, malm,...Nearest to world: crb, alexander, diphthongs, adventist, persons, alabama, leftist, idol,Nearest to would: will, may, can, could, might, must, should, to,Average loss at step 92000 : 4.71387670171Average loss at step 94000 : 4.62264616895Average loss at step 96000 : 4.71346354306Average loss at step 98000 : 4.62557934207Average loss at step 100000 : 4.66860903692Nearest to this: which, it, the, that, mico, spokesperson, one, dwell,Nearest to however: but, although, that, dasyprocta, ssbn, and, opencyc, microsite,Nearest to was: is, had, has, were, became, been, when, by,Nearest to d: b, lomond, raions, gh, t, deemed, aon, microsite,Nearest to use: thaler, histone, agouti, crb, primigenius, potomac, callithrix, upanija,Nearest to other: many, agouti, including, these, some, bends, callithrix, clodius,Nearest to over: about, lw, off, coolidge, absent, six, mileva, four,Nearest to after: before, when, during, in, persisted, following, was, gave,Nearest to called: UNK, used, subatomic, clo, aorta, and, emblems, implied,Nearest to often: sometimes, usually, generally, still, commonly, now, frequently, also,Nearest to of: nine, in, dasyprocta, and, including, callithrix, amo, thaler,Nearest to have: had, has, are, were, be, include, aral, elevate,Nearest to can: may, would, will, could, must, should, might, to,Nearest to three: four, five, two, six, seven, eight, one, nine,Nearest to world: alexander, diphthongs, crb, leftist, adventist, guerrilla, vortigern, appointment,Nearest to would: will, may, can, could, might, must, should, to,'''
0 0
- tensorflow37《TensorFlow实战》笔记-07-01 TensorFlow实现Word2Vec code
- tensorflow33《TensorFlow实战》笔记-06-01 TensorFlow实现AlexNet code
- TensorFlow实战11:实现Word2Vec
- tensorflow32《TensorFlow实战》笔记-05 TensorFlow实现卷积神经网络 code
- tensorflow34《TensorFlow实战》笔记-06-02 TensorFlow实现VGGNet code
- tensorflow35《TensorFlow实战》笔记-06-03 TensorFlow实现 GoogleInceptionV3 code
- tensorflow36《TensorFlow实战》笔记-06-04 TensorFlow实现 ResNet code
- Tensorflow实战学习(三十四)【实现Word2Vec】
- tensorflow38《TensorFlow实战》笔记-07-02 TensorFlow实现基于LSTM的语言模型 code
- tensorflow39《TensorFlow实战》笔记-07-03 TensorFlow实现Bidirectional LSTM Classifier code
- tensorflow40《TensorFlow实战》笔记-08-01 TensorFlow实现深度强化学习-策略网络 code
- tensorflow笔记:使用tf来实现word2vec
- tensorflow笔记:使用tf来实现word2vec
- tensorflow笔记:使用tf来实现word2vec
- 暑期 tensorflow+word2vec 笔记
- 基于tensorflow实现word2vec
- tensorflow 实现word2vec
- tensorflow实现word2vec
- 第二章(插入排序的实现)
- GDB十分钟教程(猿哥不识GDB,撸尽代码也枉然)
- (转)JSON解析之原生解析
- Intellij IDEA之mybatis-generator自动生成
- nyoj 5 Binary String Matching (KMP)
- tensorflow37《TensorFlow实战》笔记-07-01 TensorFlow实现Word2Vec code
- Java设计模式(命令模式)
- vector 避免内存频繁分配释放与手动释放vector内存
- 计算阶乘
- 数据库中原子性,隔离性,一致性如何实现?
- ITEXT-定位PDF中图片的坐标与页码
- iOS app如何才能安全登录验证
- Vmware vSphere 出现 unable to connect to the mks问题的解决办法
- 调整数组顺序让奇数位于偶数前面