基于TensorFlow实现Skip-Gram模型

来源:互联网 发布:手机三维绘图软件 编辑:程序博客网 时间:2024/06/06 11:24

理解 Word2Vec 之 Skip-Gram 模型

Word2Vec是从大量文本语料中以无监督的方式学习语义知识的一种模型,它被大量地用在自然语言处理(NLP)中。Word2Vec其实是通过学习文本来用词向量的方式表征词的语义信息,即通过一个嵌入空间使得语义上相似的单词在该空间内距离很近。Embedding是一个映射,将单词从原先所属的空间映射到新的多维空间中,也就是把原先词所在空间嵌入到一个新的空间中去。

从直观角度上来理解一下,cat这个单词和kitten属于语义上很相近的词,而dog和kitten则不是那么相近,iphone这个单词和kitten的语义就差的更远了。通过对词汇表中单词进行这种数值表示方式的学习(也就是将单词转换为词向量),能够让我们基于这样的数值进行向量化的操作从而得到一些有趣的结论。比如说,如果我们对词向量kitten、cat以及dog执行这样的操作:kitten - cat + dog,那么最终得到的嵌入向量(embeddedvector)将与puppy这个词向量十分相近。

具体内容请看知乎专栏:

https://zhuanlan.zhihu.com/p/27234078

和 https://zhuanlan.zhihu.com/p/27296712

原文英文文档请参考链接:

- Word2Vec Tutorial- The Skip-Gram Model

http://link.zhihu.com/?target=http%3A//mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/

- Word2Vec (Part 1):NLP With Deep Learning with Tensorflow (Skip-gram)

http://link.zhihu.com/?target=http%3A//www.thushv.com/natural_language_processing/word2vec-part-1-nlp-with-deep-learning-with-tensorflow-skip-gram/

这是我按照作者的介绍,复制他的代码,进行了程序的运行,记录在我的博客上面,进行更加深入的学习。

import os import timeimport numpy as npimport tensorflow as tfimport randomfrom collections import Counteros.environ['TF_CPP_MIN_LOG_LEVEL']='2' with open('C:/Users/long/source/datasets/tex.txt') as f:    text = f.read()# 定义函数来完成数据的预处理def preprocess(text, freq=5):    '''    对文本进行预处理        参数    ---    text: 文本数据    freq: 词频阈值    '''    # 对文本中的符号进行替换    text = text.lower()    text = text.replace('.', ' <PERIOD> ')    text = text.replace(',', ' <COMMA> ')    text = text.replace('"', ' <QUOTATION_MARK> ')    text = text.replace(';', ' <SEMICOLON> ')    text = text.replace('!', ' <EXCLAMATION_MARK> ')    text = text.replace('?', ' <QUESTION_MARK> ')    text = text.replace('(', ' <LEFT_PAREN> ')    text = text.replace(')', ' <RIGHT_PAREN> ')    text = text.replace('--', ' <HYPHENS> ')    text = text.replace('?', ' <QUESTION_MARK> ')    # text = text.replace('\n', ' <NEW_LINE> ')    text = text.replace(':', ' <COLON> ')    words = text.split()        # 删除低频词,减少噪音影响    word_counts = Counter(words)    trimmed_words = [word for word in words if word_counts[word] > freq]    return trimmed_words# 清洗文本并分词words = preprocess(text)print(words[:20])# 构建映射表vocab = set(words)vocab_to_int = {w: c for c, w in enumerate(vocab)}int_to_vocab = {c: w for c, w in enumerate(vocab)}print("total words: {}".format(len(words)))print("unique words: {}".format(len(set(words))))# 对原文本进行vocab到int的转换int_words = [vocab_to_int[w] for w in words]t = 1e-5 # t值threshold = 0.8 # 剔除概率阈值# 统计单词出现频次int_word_counts = Counter(int_words)total_count = len(int_words)# 计算单词频率word_freqs = {w: c/total_count for w, c in int_word_counts.items()}# 计算被删除的概率prob_drop = {w: 1 - np.sqrt(t / word_freqs[w]) for w in int_word_counts}# 对单词进行采样train_words = [w for w in int_words if prob_drop[w] < threshold]print(len(train_words))def get_targets(words, idx, window_size=5):    '''    获得input word的上下文单词列表        参数    ---    words: 单词列表    idx: input word的索引号    window_size: 窗口大小    '''    target_window = np.random.randint(1, window_size+1)    # 这里要考虑input word前面单词不够的情况    start_point = idx - target_window if (idx - target_window) > 0 else 0    end_point = idx + target_window    # output words(即窗口中的上下文单词)    targets = set(words[start_point: idx] + words[idx+1: end_point+1])    return list(targets)def get_batches(words, batch_size, window_size=5):    '''    构造一个获取batch的生成器    '''    n_batches = len(words) // batch_size        # 仅取full batches    words = words[:n_batches*batch_size]        for idx in range(0, len(words), batch_size):        x, y = [], []        batch = words[idx: idx+batch_size]        for i in range(len(batch)):            batch_x = batch[i]            batch_y = get_targets(batch, i, window_size)            # 由于一个input word会对应多个output word,因此需要长度统一            x.extend([batch_x]*len(batch_y))            y.extend(batch_y)        yield x, ytrain_graph = tf.Graph()with train_graph.as_default():    inputs = tf.placeholder(tf.int32, shape=[None], name='inputs')    labels = tf.placeholder(tf.int32, shape=[None, None], name='labels')vocab_size = len(int_to_vocab)embedding_size = 200 # 嵌入维度with train_graph.as_default():    # 嵌入层权重矩阵    embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1, 1))    # 实现lookup    embed = tf.nn.embedding_lookup(embedding, inputs)n_sampled = 100with train_graph.as_default():    softmax_w = tf.Variable(tf.truncated_normal([vocab_size, embedding_size], stddev=0.1))    softmax_b = tf.Variable(tf.zeros(vocab_size))        # 计算negative sampling下的损失    loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, vocab_size)        cost = tf.reduce_mean(loss)    optimizer = tf.train.AdamOptimizer().minimize(cost)with train_graph.as_default():    # 随机挑选一些单词    valid_size = 16     valid_window = 100    # 从不同位置各选8个单词    valid_examples = np.array(random.sample(range(valid_window), valid_size//2))    valid_examples = np.append(valid_examples,                                random.sample(range(1000,1000+valid_window), valid_size//2))        valid_size = len(valid_examples)    # 验证单词集    valid_dataset = tf.constant(valid_examples, dtype=tf.int32)        # 计算每个词向量的模并进行单位化    norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))    normalized_embedding = embedding / norm    # 查找验证单词的词向量    valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)    # 计算余弦相似度    similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))epochs = 10 # 迭代轮数batch_size = 1000 # batch大小window_size = 10 # 窗口大小with train_graph.as_default():    saver = tf.train.Saver() # 文件存储with tf.Session(graph=train_graph) as sess:    iteration = 1    loss = 0    sess.run(tf.global_variables_initializer())    for e in range(1, epochs+1):        batches = get_batches(train_words, batch_size, window_size)        start = time.time()        #         for x, y in batches:                        feed = {inputs: x,                    labels: np.array(y)[:, None]}            train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)                        loss += train_loss                        if iteration % 100 == 0:                 end = time.time()                print("Epoch {}/{}".format(e, epochs),                      "Iteration: {}".format(iteration),                      "Avg. Training loss: {:.4f}".format(loss/100),                      "{:.4f} sec/batch".format((end-start)/100))                loss = 0                start = time.time()                        # 计算相似的词            if iteration % 1000 == 0:                # 计算similarity                sim = similarity.eval()                for i in range(valid_size):                    valid_word = int_to_vocab[valid_examples[i]]                    top_k = 8 # 取最相似单词的前8个                    nearest = (-sim[i, :]).argsort()[1:top_k+1]                    log = 'Nearest to [%s]:' % valid_word                    for k in range(top_k):                        close_word = int_to_vocab[nearest[k]]                        log = '%s %s,' % (log, close_word)                    print(log)                        iteration += 1                save_path = saver.save(sess, "C:/Users/long/source/datasets/text8.ckpt")    embed_mat = sess.run(normalized_embedding)


阅读全文
1 0
原创粉丝点击