用R实现文本挖掘

来源:互联网 发布:java获取json对象的值 编辑:程序博客网 时间:2024/06/06 08:27
互联网公司一般都有质量审核部门,这些部门专门查找违禁帖子,但网站帖子量成千上万,如何快速的查找到违禁帖子,想到的一个办法就是通过数据模型(分类算法)来解决这个问题,对帖子文本进行分析,建立数据模型,甄别出哪些有可能是违禁的帖子
setwd("~/text_ming")load("~/text_ming/doc_CN.rda")library(rJava)library(Rwordseg)segmentCN('车试试',returnType = 'tm')library(tm)library(SnowballC)setwd("~/text_ming")ac <- read.csv('ad_sample_10w.csv',stringsAsFactors = F)ac <- read.table(file="ad_sample_10w1.txt",colClasses="character",header=T,sep=",")ac1 <- subset(ac, t2.dt >0)str(ac1)names(ac1) <- c('ad_id','spam','content','title','category','dt')str(ac1)doc <- ac1$content#进行中文的分词活动doc_CN=list()for(j in 1:length(doc)){  doc[j]=gsub("/","",doc[j])  doc_CN[[j]]=c(segmentCN(doc[j],returnType = 'tm'))}save(doc_CN,file='doc_CN.rda')load('doc_CN.rda')# Create corpuscorpus = Corpus(VectorSource(doc_CN))# Look at corpuscorpuscorpus[[10000]]#这里存一下corpus,这样就不用每次配置rjava和Rwordseg这么麻烦了#save(corpus,file='corpus.rda')corpus = tm_map(corpus, removePunctuation) ###########停用词###########data_stw=read.table(file="chinese_stop_words.txt",colClasses="character")head(data_stw)stopwords_CN=c(NULL)for(i in 1:dim(data_stw)[1]){  stopwords_CN=c(stopwords_CN,data_stw[i,1])}stopwords_CN = unique(stopwords_CN)#save(stopwords_CN,file='stopwords_CN.rda')#str(stopwords_CN)doc.corpus=tm_map(corpus,removeWords,stopwords_CN) # 删除停用词,该死的tm又出bug了。所以这个没有用。doc.corpus[[1]]#############################删除数字doc.corpus=tm_map(corpus,removeNumbers)doc.corpus[[10000]]#########################################创建词项-文档矩阵(TDM)################ 创建词项-文档矩阵(TDM) 网上方法,不是很好control=list(removePunctuation=T,minDocFreq=5,wordLengths = c(1, Inf),weighting = weightTfIdf,stopwords =stopwords_CN)doc.tdm=TermDocumentMatrix(doc.corpus,control)length(doc.tdm$dimnames$Terms)tdm_removed=removeSparseTerms(doc.tdm, 0.97) # 1-去除了低于 99.98% 的稀疏条目项length(tdm_removed$dimnames$Terms)findFreqTerms(tdm_removed,10,1000)####################Create matrix#################################################这里试一试课上的方法。# Create matrixfrequencies = DocumentTermMatrix(doc.corpus)#frequencies = DocumentTermMatrix(corpus)frequencies# Look at matrix inspect(frequencies[1000:1005,505:515])# Check for sparsityfindFreqTerms(frequencies, lowfreq=10)# Remove sparse termssparse = removeSparseTerms(frequencies, 0.995)####这里我改了一下sparse =tdm_removed# Convert to a data framespamSparse = as.data.frame(t(as.matrix(sparse)))head(spamSparse)# Make all variable names R-friendlycolnames(spamSparse) = make.names(colnames(spamSparse))# Add dependent variablestr(ac1)length(ac1[,1])spamSparse$spam = ac1$spam[1:44603]head(spamSparse)summary(spamSparse)# Split the datalibrary(caTools)set.seed(123)split = sample.split(spamSparse$spam, SplitRatio = 0.7)trainSparse = subset(spamSparse, split==TRUE)testSparse = subset(spamSparse, split==FALSE)# Video 7# Build a CART modellibrary(rpart)library(rpart.plot)tweetCART = rpart(spam ~ ., data=trainSparse, method="class")prp(tweetCART)# Evaluate the performance of the modelpredictCART = predict(tweetCART, newdata=testSparse, type="class")table(testSparse$spam, predictCART)(8224+11554)/nrow(testSparse)# Compute accuracy# Baseline accuracy table(testSparse$spam)13596 /nrow(testSparse)#glmhead(trainSparse)trainSparse$spam=as.numeric(trainSparse$spam)typeof(trainSparse$spam)glm <- glm(spam ~ ., data=trainSparse,family='binomial')glm <- lm(spam ~ ., data=trainSparse)#test resultpred <- predict(glm,  newdata=testSparse, type='response')#plot roclibrary(ROCR)ROCRpred = prediction(pred, testSparse$spam)as.numeric(performance(ROCRpred, "auc")@y.values)perf = performance(ROCRpred, "tpr", "fpr")plot(perf,col='blue',add=T)


0 0