keras 嵌入层理解 及情感分析小例子

来源:互联网 发布:sonar软件 编辑:程序博客网 时间:2024/05/18 22:14


1、词嵌入的解释与代码实现:建议大家参考这篇简书上的文章:http://www.jianshu.com/p/0124ac7d72b8

2、keras有一个嵌入层,看官方文档没有搞懂含义,推荐找到的这篇文章:https://yq.aliyun.com/articles/221681

  这是代码:

# -*- coding: utf-8 -*-"""Created on Tue Nov 21 22:26:20 2017@author: www"""from keras.preprocessing.text import one_hotfrom keras.preprocessing.sequence import pad_sequencesfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.layers import Flattenfrom keras.layers.embeddings import Embedding# define documentsdocs = ['Well done!','Good work','Great effort','nice work','Excellent!','Weak','Poor effort!','not good','poor work','Could have done better.']  # define class labelslabels = [1,1,1,1,1,0,0,0,0,0]  # integer encode the documentsvocab_size = 50encoded_docs = [one_hot(d, vocab_size) for d in docs]print(encoded_docs)# pad documents to a max length of 4 wordsmax_length = 4padded_docs=pad_sequences(encoded_docs,maxlen=max_length, padding='post')print(padded_docs)# define the modelmodel = Sequential()model.add(Embedding(vocab_size, 8, input_length=max_length))model.add(Flatten())model.add(Dense(1, activation='sigmoid'))# compile the modelmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])# summarize the modelprint(model.summary())
# fit the modelmodel.fit(padded_docs, labels, epochs=50, verbose=0)# evaluate the modelloss, accuracy = model.evaluate(padded_docs, labels, verbose=0)print('Accuracy: %f' % (accuracy*100))

使用预训练的glove嵌入实例:

from numpy import asarrayfrom numpy import zerosfrom keras.preprocessing.text import Tokenizerfrom keras.preprocessing.sequence import pad_sequencesfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.layers import Flattenfrom keras.layers import Embedding# define documentsdocs = ['Well done!','Good work','Great effort','nice work','Excellent!','Weak','Poor effort!','not good','poor work','Could have done better.']# define class labelslabels = [1,1,1,1,1,0,0,0,0,0]# prepare tokenizert = Tokenizer()t.fit_on_texts(docs)vocab_size = len(t.word_index) + 1# integer encode the documentsencoded_docs = t.texts_to_sequences(docs)print(encoded_docs)# pad documents to a max length of 4 wordsmax_length = 4padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')print(padded_docs)# load the whole embedding into memoryembeddings_index = dict()f = open('../glove_data/glove.6B/glove.6B.100d.txt')for line in f:values = line.split()word = values[0]coefs = asarray(values[1:], dtype='float32')embeddings_index[word] = coefsf.close()print('Loaded %s word vectors.' % len(embeddings_index))# create a weight matrix for words in training docsembedding_matrix = zeros((vocab_size, 100))for word, i in t.word_index.items():embedding_vector = embeddings_index.get(word)if embedding_vector is not None:embedding_matrix[i] = embedding_vector# define modelmodel = Sequential()e = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=4, trainable=False)model.add(e)model.add(Flatten())model.add(Dense(1, activation='sigmoid'))# compile the modelmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])# summarize the modelprint(model.summary())# fit the modelmodel.fit(padded_docs, labels, epochs=50, verbose=0)# evaluate the modelloss, accuracy = model.evaluate(padded_docs, labels, verbose=0)print('Accuracy: %f' % (accuracy*100))


3.使用keras做情感分析:在这里使用的是keras自带的imdb数据

其中已经给每个词标注了一个索引(index),每段文字的每一个词对应了一个数字。

加载数据看一下:

from keras.datasets import imdbimport numpy as np(X_train,y_train),(X_test,y_test) = imdb.load_data()#看一下每个数据有多少词:avg_len = list(map(len, X_train))print(np.mean(avg_len))#平均子长为238.714#画一个直方图,直观显示:import matplotlib.pyplot as pltplt.hist(avg_len, bins=range(min(avg_len), max(avg_len) + 50, 50))

开始进行情感分析:

from keras.models import Sequentialfrom keras.layers import Dense,Flattenfrom keras.layers.embeddings import Embeddingfrom keras.preprocessing import sequenceimport numpy as npfrom keras.datasets import imdb(X_train,y_train),(X_test,y_test) = imdb.load_data()#keras的pad_sequences 函数可以实现文本填充,#maxlen指示文本最长的单词数,超过默认截取后面的字符max_word = 400X_train = sequence.pad_sequences(X_train, maxlen=max_word)X_test = sequence.pad_sequences(X_test, maxlen=max_word)vocab_size = np.max([np.max(X_train[i]) for i in range (X_train.shape[0])]) + 1                    #keras提供了Endedding#参数是文本的词向量数量,词向量的维度,每个文本输入的长度model = Sequential()model.add(Embedding(vocab_size, 64, input_length=max_word))model.add(Flatten())model.add(Dense(2000, activation='relu'))model.add(Dense(500, activation='relu'))model.add(Dense(200, activation='relu'))model.add(Dense(50, activation='relu'))model.add(Dense(1,activation='sigmoid'))model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])print(model.summary())model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=100, verbose=1)score = model.evaluate(X_test, y_test)#精度大约在85%左右

卷积神经网络的搭建模型如下:

from keras.layers import Conv1Dfrom keras.layers import MaxPool1Dfrom keras.layers import Dropoutmodel = Sequential()model.add(Embedding(vocab_size, 64, input_length=max_word))model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'))model.add(MaxPool1D(pool_size=2))model.add(Dropout(0.25))model.add(Conv1D(filters=128, kernel_size=3, padding='same', activation='relu'))model.add(MaxPool1D(pool_size=2))model.add(Dropout(0.25))model.add(Dense(64, activation='relu'))model.add(Dense(32, activation='relu'))model.add(Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])print(model.summary())

lstm网络搭建如下:

from keras.layers import LSTMmodel = Sequential()model.add(Embedding(vocab_size, 64, input_length=max_word))model.add(LSTM(128, return_sequences=True))model.add(Dropout(0.2))model.add(LSTM(64, return_sequences=True))model.add(Dropout(0.2))model.add(LSTM(32, return_sequences=True))model.add(Dropout(0.2))model.add(Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])print(model.summary())

这都是简单的实现网络结构,keras的优点就是使用简单,让人把注意力放在模型构建和优化上。

以上lstm的准确率略高一点。