python数据分析学习笔记九
来源:互联网 发布:php require use 编辑:程序博客网 时间:2024/04/21 00:23
第九章 分析文本数据和社交媒体
1 安装nltk 略
2 滤除停用字 姓名和数字
示例代码如下:
import nltk# 加载英语停用字语料sw = set(nltk.corpus.stopwords.words('english'))print('Stop words', list(sw)[:7])# 取得gutenberg语料库中的部分文件gb = nltk.corpus.gutenbergprint('Gutenberg files', gb.fileids()[-5:])# 取milton-paradise.txt文件中的前两句,作为下面所用的过滤语句text_sent = gb.sents("milton-paradise.txt")[:2]print('Unfiltered', text_sent)# 过滤停用字for sent in text_sent: filtered = [w for w in sent if w.lower() not in sw] print('Filtered', filtered) # 取得文本内所含的标签 tagged = nltk.pos_tag(filtered) print("Tagged", tagged) words = [] for word in tagged: if word[1] != 'NNP' and word[1] != 'CD': words.append(word[0]) print(words)# 词性标注集# print(nltk.tag.tagset_mapping('ru-rnc', 'universal'))
运行结果如下:
Connected to pydev debugger (build162.1967.10)
Stop words ['his', 'only', 'because','with', 'each', 'myself', 'both']
Gutenberg files ['milton-paradise.txt','shakespeare-caesar.txt', 'shakespeare-hamlet.txt', 'shakespeare-macbeth.txt','whitman-leaves.txt']
Unfiltered [['[', 'Paradise', 'Lost', 'by','John', 'Milton', '1667', ']'], ['Book', 'I']]
Filtered ['[', 'Paradise', 'Lost', 'John','Milton', '1667', ']']
Tagged [('[', 'JJ'), ('Paradise', 'NNP'),('Lost', 'NNP'), ('John', 'NNP'), ('Milton', 'NNP'), ('1667', 'CD'), (']','NN')]
['[', ']']
Filtered ['Book']
Tagged [('Book', 'NN')]
['Book']
本例用到的标记集:
{'PRP$',
'PDT',
'CD',
'EX',
'.',
'NNS',
'MD',
'PRP',
'RP',
'(',
'VBD',
'``',
"''",
'NN', 名词
'LS',
'VBN',
'WRB',
'IN', 介词
'FW',
'POS',
'CC', 并连词
':',
'DT',
'VBZ',
'RBS',
'RBR',
'WP$',
'RB',
'SYM',
'JJS',
'JJR',
'UH',
'WDT',
'#',
',',
')',
'VB',
'NNPS',
'VBP', 动词
'NNP',
'JJ', 形容词
'WP',
'VBG',
'$',
'TO'} 单词to
粗略的分为以下12种类型
'VERB',
'NOUN',
'PRON',
'ADJ',
'ADV',
'ADP',
'CONJ',
'DET',
'NUM',
'PRT',
'X',
'.'
3 词袋模型
安装scikit-learn略
示例代码如下:
import nltkfrom sklearn.feature_extraction.text import CountVectorizer# 从gutenberg语料库中加载以下两个文件gb = nltk.corpus.gutenberghamlet = gb.raw('shakespeare-hamlet.txt')macbeth = gb.raw("shakespeare-macbeth.txt")# 去掉英语停用词cv = CountVectorizer(stop_words='english')# 输出部分特征值print("Feature vector", cv.fit_transform([hamlet, macbeth]).toarray())# 特征值是按字母顺序排序print('Features', cv.get_feature_names()[:5])
运行结果如下:
Feature vector [[ 1 0 1..., 14 0 1]
[0 1 0 ..., 1 1 0]]
Features ['1599', '1603', 'abhominably','abhorred', 'abide']
4 词频分析
示例代码如下:
def printLine(values, num, keyOrValue, tag): """ 打印指定列表的num个元素的key或是value,输出标签为tag :param values:列表 :param num: 输出元素个数 :param keyOrValue: 输出元素的键还是值 0表示键,1表示值 :param tag: 输出标签 :return: """ tmpValue = [] for key in sorted(values.items(), key=lambda d: d[1], reverse=True)[:num]: tmpValue.append(key[keyOrValue]) print(tag, ":", tmpValue)# 加载文档gb = nltk.corpus.gutenbergwords = gb.words("shakespeare-caesar.txt")# 支除停用词和标点符号sw = set(nltk.corpus.stopwords.words('english'))punctuation = set(string.punctuation)filtered = [w.lower() for w in words if w.lower() not in sw and w.lower() not in punctuation]# 创建freqDist对象,输出频率最高的键和值fd = nltk.FreqDist(filtered)printLine(fd, 5, 0, "Wrods")printLine(fd, 5, 1, "Counts")# 最常出现的单词和次数print('Max', fd.max())print('Count', fd['caesar'])# 最常出现的双字词和词数fd = nltk.FreqDist(nltk.bigrams(filtered))printLine(fd, 5, 0, "Bigrams")printLine(fd, 5, 1, "Counts")print('Bigram Max', fd.max())print('Bigram count', fd[('let', 'vs')])# 最常出现的三字词和词数fd = nltk.FreqDist(nltk.trigrams(filtered))printLine(fd, 5, 0, "Trigrams")printLine(fd, 5, 1, "Counts")print('Bigram Max', fd.max())print('Bigram count', fd[('enter', 'lucius', 'luc')])
运行结果如下:
Wrods : ['caesar', 'brutus', 'bru', 'haue','shall']
Counts : [190, 161, 153, 148, 125]
Max caesar
Count 190
Bigrams : [('let', 'vs'), ('wee', 'l'),('mark', 'antony'), ('marke', 'antony'), ('st', 'thou')]
Counts : [16, 15, 13, 12, 12]
Bigram Max ('let', 'vs')
Bigram count 16
Trigrams : [('enter', 'lucius', 'luc'),('wee', 'l', 'heare'), ('thee', 'thou', 'st'), ('beware', 'ides', 'march'),('let', 'vs', 'heare')]
Counts : [4, 4, 3, 3, 3]
Bigram Max ('enter', 'lucius', 'luc')
Bigram count 4
5 朴素贝叶斯分类
是一个概率算法,基于概率和数理统计中的贝叶斯定理
示例代码如下:
import nltkimport stringimport random# 停用词和标点符号集合sw = set(nltk.corpus.stopwords.words('english'))punctuation = set(string.punctuation)# 将字长作为一个特征def word_features(word): return {'len': len(word)}# 是否为停用词或是标点符号def isStopword(word): return word in sw or word in punctuation# 加载文件gb = nltk.corpus.gutenbergwords = gb.words("shakespeare-caesar.txt")# 对单词进行标注,区分是否为停用词labeled_words = ([(word.lower(), isStopword(word.lower())) for word in words])random.seed(42)random.shuffle(labeled_words)print(labeled_words[:5])# 求出每个单词的长度,作为特征值featuresets = [(word_features(n), word) for (n, word) in labeled_words]# 训练一个朴素贝叶斯分类器cutoff = int(.9 * len(featuresets))# 创建训练数据集和测试数据集train_set, test_set = featuresets[:cutoff], featuresets[cutoff:]# 检查分类器效果classifier = nltk.NaiveBayesClassifier.train(train_set)print("'behold' class", classifier.classify(word_features('behold')))print("'the' class", classifier.classify(word_features('the')))# 根据测试数据集来计算分类器的准确性print("Accuracy", nltk.classify.accuracy(classifier, test_set))# 贡献度最大的特征print(classifier.show_most_informative_features(5))
运行结果如下:
[('i', True), ('is', True), ('in', True),('he', True), ('ambitious', False)]
'behold' class False
'the' class True
Accuracy 0.8521671826625387
Most Informative Features
len = 7 False : True = 77.8 : 1.0
len = 6 False : True = 52.2 : 1.0
len = 1 True : False = 51.8 : 1.0
len = 2 True : False = 10.9 : 1.0
len = 5 False : True = 10.9 : 1.0
None
6 情感分析
示例代码如下:
import randomfrom nltk.corpus import movie_reviewsfrom nltk.corpus import stopwordsfrom nltk import FreqDistfrom nltk import NaiveBayesClassifierfrom nltk.classify import accuracyimport stringdef getElementsByNum(values, num, keyOrValue): """ 取得指定列表的num个元素的key或是value, :param values:列表 :param num: 元素个数 :param keyOrValue: 元素的键还是值 0表示键,1表示值 :return: """ tmpValue = [] for key in sorted(values.items(), key=lambda d: d[1], reverse=True)[:num]: tmpValue.append(key[keyOrValue]) return tmpValue# 加载数据labeled_docs = [(list(movie_reviews.words(fid)), cat) for cat in movie_reviews.categories() for fid in movie_reviews.fileids(cat)]random.seed(42)random.shuffle(labeled_docs)review_words = movie_reviews.words()print("#Review Words", len(review_words))# 设置停用词和标点符号sw = set(stopwords.words('english'))punctuation = set(string.punctuation)# 检查是否为停用词def isStopWord(word): return word in sw or word in punctuation# 过滤停用词和标点符号filtered = [w.lower() for w in review_words if not isStopWord(w.lower())]# print("# After filter", len(filtered))# 选用词频最高的前5%作为特征words = FreqDist(filtered)N = int(.05 * len(words.keys()))# word_features = words.keys()[:N]word_features = getElementsByNum(words, N, 0)print('word_features', word_features)# 使用原始单词计数来作为度量指标def doc_features(doc): doc_words = FreqDist(w for w in doc if not isStopWord(w)) features = {} for word in word_features: features['count (%s)' % word] = (doc_words.get(word, 0)) return features# 使用原始单词计数,来作为特征值featuresets = [(doc_features(d), c) for (d, c) in labeled_docs]# 创建训练数据集和测试数据集train_set, test_set = featuresets[200:], featuresets[:200]# 检查分类器效果classifier = NaiveBayesClassifier.train(train_set)# 根据测试数据集来计算分类器的准确性print("Accuracy", accuracy(classifier, test_set))# 贡献度最大的特征print(classifier.show_most_informative_features())
运行结果如下:
#Review Words 1583820
# After filter 710579
Accuracy 0.765
Most Informative Features
count (wonderful) = 2 pos : neg = 14.8 : 1.0
count (outstanding) = 1 pos : neg = 12.0 : 1.0
count (apparently) = 2 neg : pos = 12.0 : 1.0
count (stupid) = 2 neg : pos = 11.1 : 1.0
count (boring) = 2 neg : pos = 10.7 : 1.0
count (bad) = 5 neg : pos = 10.0 : 1.0
count (best) = 4 pos : neg = 9.9 : 1.0
count (anyway) = 2 neg : pos = 8.1 : 1.0
count (minute) = 2 neg : pos = 8.1 : 1.0
count (matt) = 2 pos : neg = 7.9 : 1.0
None
7 创建词云
示例代码如下:
def getElementsByNum(values, num, keyOrValue): """ 取得指定列表的num个元素的key或是value, :param values:列表 :param num: 元素个数 :param keyOrValue: 元素的键还是值 0表示键,1表示值 :return: """ tmpValue = [] for key in sorted(values.items(), key=lambda d: d[1], reverse=True)[:num]: tmpValue.append(key[keyOrValue]) return tmpValue# 停用词和标点符号集合sw = set(stopwords.words('english'))punctuation = set(string.punctuation)# 检查是否为停用词或是标点符号def isStopWord(word): return word in sw and word in punctuation# 取得原始文档review_words = movie_reviews.words()# 过滤停用词和标点符号filtered = [w.lower() for w in review_words if not isStopWord(w.lower())]# 选用词频最高的前1%作为特征words = FreqDist(filtered)N = int(.01 * len(words.keys()))tags = getElementsByNum(words, N, 0)# tags = words.keys()[:N]for tag in tags: print(tag, ":", words[tag])# 将输出结果粘粘到wordle页面,就可以得到词云页面
输出结果:略
进一步的过滤
词频和逆文档频率 The Term Frequency -Inverse DocumentFrequency TF-IDF
示例代码如下:
from nltk.corpus import movie_reviewsfrom nltk.corpus import stopwordsfrom nltk.corpus import namesfrom nltk import FreqDistfrom sklearn.feature_extraction.text import TfidfVectorizerimport itertoolsimport pandas as pdimport numpy as npimport string# 设置停用词 标点符号和姓名sw = set(stopwords.words('english'))punctuation = set(string.punctuation)all_names = set([name.lower() for name in names.words()])# 过滤单词(停用词,标点符号,姓名,数字)def isStopWord(word): return (word in sw or word in punctuation) or not word.isalpha() or word in all_names# 取得影评文档review_words = movie_reviews.words()# 过滤停用词filtered = [w.lower() for w in review_words if not isStopWord(w.lower())]words = FreqDist(filtered)# 创建TfidfVectorizer所需要的字符串列表(过滤掉停用词和只出现一次的单词 )texts = []for fid in movie_reviews.fileids(): texts.append(" ".join([w.lower() for w in movie_reviews.words(fid) if not isStopWord(w.lower()) and words[w.lower()] > 1]))# 创建向量化程序vectorizer = TfidfVectorizer(stop_words='english')matrix = vectorizer.fit_transform(texts)# 求单词的TF-IDF的和sums = np.array(matrix.sum(axis=0)).ravel()# 通过单词的排名权值ranks = []# 不可用# for word, val in itertools.izip(vectorizer.get_feature_names(), sums):for word, val in zip(vectorizer.get_feature_names(), sums): ranks.append((word, val))# 创建DataFramedf = pd.DataFrame(ranks, columns=['term', 'tfidf'])# 并排序# df = df.sort(columns='tfidf')df = df.sort_values(by='tfidf')# 输出排名字低的值print(df.head())N = int(.01 * len(df))df = df.tail(N)# 不可用# for term, tfidf in itertools.izip(df['term'].values, df['tfidf'].values):for term, tfidf in zip(df['term'].values, df['tfidf'].values): print(term, ":", tfidf)
运行结果如下:
term tfidf
19963 superintendent 0.03035
8736 greys 0.03035
14010 ology 0.03035
2406 briefer 0.03035
2791 cannibalize 0.03035
matter : 10.1601563202
review : 10.1621092081
...
jokes : 10.1950553877
8 社交网络分析
安装networdX 略
利用网络理论来研究社会关系
示例代码如下:
import matplotlib.pyplot as pltimport networkx as nx# NetwordX所的供的示例图print([s for s in dir(nx) if s.endswith("graph")])G = nx.davis_southern_women_graph()plt.figure(1)plt.hist(list(nx.degree(G).values()))plt.figure(2)pos = nx.spring_layout(G)nx.draw(G, node_size=9)nx.draw_networkx_labels(G, pos)plt.show()
运行结果如下:
['LCF_graph', 'barabasi_albert_graph','barbell_graph', 'binomial_graph', 'bull_graph', 'caveman_graph','chordal_cycle_graph', 'chvatal_graph', 'circulant_graph','circular_ladder_graph', 'complete_bipartite_graph', 'complete_graph','complete_multipartite_graph', 'connected_caveman_graph','connected_watts_strogatz_graph', 'cubical_graph', 'cycle_graph','davis_southern_women_graph', 'dense_gnm_random_graph', 'desargues_graph','diamond_graph', 'digraph', 'directed_havel_hakimi_graph','dodecahedral_graph', 'dorogovtsev_goltsev_mendes_graph','duplication_divergence_graph', 'ego_graph', 'empty_graph','erdos_renyi_graph', 'expected_degree_graph', 'fast_gnp_random_graph','florentine_families_graph', 'frucht_graph', 'gaussian_random_partition_graph','general_random_intersection_graph', 'geographical_threshold_graph','gn_graph', 'gnc_graph', 'gnm_random_graph', 'gnp_random_graph', 'gnr_graph','graph', 'grid_2d_graph', 'grid_graph', 'havel_hakimi_graph', 'heawood_graph','house_graph', 'house_x_graph', 'hypercube_graph', 'icosahedral_graph','is_directed_acyclic_graph', 'k_random_intersection_graph','karate_club_graph', 'kl_connected_subgraph', 'krackhardt_kite_graph','ladder_graph', 'line_graph', 'lollipop_graph', 'make_max_clique_graph','make_small_graph', 'margulis_gabber_galil_graph', 'moebius_kantor_graph','multidigraph', 'multigraph', 'navigable_small_world_graph','newman_watts_strogatz_graph', 'null_graph', 'nx_agraph', 'octahedral_graph','pappus_graph', 'path_graph', 'petersen_graph', 'planted_partition_graph','powerlaw_cluster_graph', 'projected_graph', 'quotient_graph','random_clustered_graph', 'random_degree_sequence_graph','random_geometric_graph', 'random_partition_graph', 'random_regular_graph','random_shell_graph', 'relabel_gexf_graph', 'relaxed_caveman_graph','scale_free_graph', 'sedgewick_maze_graph', 'star_graph', 'stochastic_graph','subgraph', 'tetrahedral_graph', 'to_networkx_graph', 'trivial_graph','truncated_cube_graph', 'truncated_tetrahedron_graph', 'tutte_graph','uniform_random_intersection_graph', 'watts_strogatz_graph', 'waxman_graph','wheel_graph']
- python数据分析学习笔记九
- python数据分析学习笔记
- Python数据分析学习笔记一
- Python数据分析学习笔记二
- Python数据分析学习笔记三
- Python数据分析学习笔记四
- Python数据分析学习笔记五
- Python数据分析学习笔记六
- python数据分析入门学习笔记儿
- python数据分析入门学习笔记
- python数据分析入门学习笔记儿
- python数据分析入门学习笔记儿
- python数据分析学习笔记一
- python数据分析学习笔记二
- python数据分析学习笔记三
- python数据分析学习笔记六
- python数据分析入门学习笔记
- # Python数据分析学习笔记(一)
- python数据分析学习笔记十
- Android Studio 打开源码项目,配置启动运行
- QPushButton的鼠标事件处理及EventFilter事件过滤器的用法
- 树莓派Debian支持ll
- DapperLambda发布
- python数据分析学习笔记九
- jquery Ajax操作
- python数据分析学习笔记八
- 剖析淘宝 TDDL ( TAOBAO DISTRIBUTE DATA LAYER )
- darwin streaming server 6.0.3 Linux编译
- LNMP添加、删除虚拟主机
- python数据分析学习笔记七
- HTTP、 HTTP1.1、 HTTP/2的区别
- h5中的列表