python机器学习-----文本分类笔记

来源:互联网 发布:手机淘宝直通车在哪 编辑:程序博客网 时间:2024/05/20 20:04
#1.数据准备import pandas as pdaimport numpy as npyfilename=""dataf=pda.read_csv(filename)x=dataf.iloc[:,1:4].as_matrix()y=dataf.iloc[:,0:1].as_matrix()#2.数据的归一化from sklearn import preprocessing#归一化处理,处理0-1nx=preprocessing.normalize(x)#标准化处理,减去平均值,然后除以方差,结果是聚集在0附近,方差是1sx=preprocessing.scale(x)#特征筛选from sklearn.ensemble import ExtraTreesClassifiermodel=ExtraTreesClassifier()model.fit(x,y)#print(model.feature_importances)#常见算法的实现--k近邻from sklearn.neighbors import KNeighborsClassifiermodel=KNeighborsClassifier()model.fit(x,y)x2=npy.array([[800,3,50,1],[372,3.71,2]])#print(model.predict(x2))#模型评价from sklearn import metrics#模型报告expected=ypredicted=model.predict()print(metrics.classification_report(expected,predicted))'''precision(精准率)假设预测目标有0,1,数据中1的个数为a,(真实分类结果数据),预测1的次数为b,预测命中次数为c,precision=c/brecallfi-score召回率=c/af1-score:2*precision*recall/(recision+recall)support(计数)'''#混淆矩阵#print(metrics.confusion_matrix(expected,predicted))#朴素贝叶斯from sklearn.naive_bayes import GaussianNBmodel=GaussianNB()model.fit(x,y)predicted=model.predict(x)#print(model.predict(x))#逻辑回归from sklearn.linear_model import LogisticRegressionmodel=LogisticRegression()model.fit(x,y)predicted=model.predict(x)#print(model.predict(x))#决策树from sklearn.tree import DecisionTreeClassifiermodel=DecisionTreeClassifier()model.fit(x,y)predicted=model.predict(x)#print(model.predict(x))#支持向量机from sklearn.svm import SVCmodel=SVC()model.fit(x,y)predicted=model.predict(x)#print(model.predict(x))#英文文本分类#文本数据准备from sklearn.datasets import fetch_20newsgroupscategories=['comp.graphics','alt.atheism','sci.med']train_text=fetch_20newsgroups(subset="train",categories=categories,shuffle=True,random_state=40)#print(train_text.data[0])print(train_text.data)#本文特征提取与词频提取from sklearn.feature_extraction.text import CountVectorizecount_vect= CountVectorize()train_x_counts=count_vect.fit_transform(train_text.data)#tfidf模型from sklearn.feature_extraction.text import TfidfTransformertf_ts=TfidfTransformer(use_idf=False).fit(train_x_counts)train_x_tf=tf_ts.transform(train_x_counts)#训练from sklearn.naive_bayes import MultinomialNBclf=MultinomialNB().fit(train_x_tf,train_text.target)#分类预测new_text=["I like reading books","computer development technology"]new_x_counts=count_vect.transform(new_text)new_x_tfidf=tf_ts.transform(new_x_counts)rst=clf.predict(new_x_tfidf)print(rst)#中文文本分类import osimport jiebadef loaddata(path,class1):    allfile=os.listdir(path)    textdata=[]    classall=[]    for thisfile in allfile:        data=open(path+"/"+thisfile,"r",encoding="gbk").read()        data1=jieba.cut(data)        data11=""        for item in data1:            data11+=item+"  "        textdata.append(data11)        classall.append(class1)    return textdata,classalltext1,class1=loaddata("",0)text2,class2=loaddata("",1)train_text=text1+text2classall=class1+class2count_vect=CountVectorize()train_X_counts=count_vect.fit_transform(train_text)from sklearn.feature_extraction.text import TfidfTransformertf_ts=TfidfTransformer(use_idf=False).fit(train_x_counts)train_x_tf=tf_ts.transform(train_x_counts)from sklearn.naive_bayes import MultinomialNBclf=MultinomialNB().fit(train_x_tf,classall)new_text=["房间 有鬼","爱情"]new_x_counts=count_vect.transform(new_text)new_x_tfidf=tf_ts.transform(new_x_counts)rst=clf.predict(new_x_tfidf)print(rst)
阅读全文
1 0
原创粉丝点击