用 python 进行文本预处理和提取特征

来源:互联网 发布:网络娱乐节目片头音乐 编辑:程序博客网 时间:2024/06/01 08:42
文本过滤
result = re.sub(r'[^\u4e00-\u9fa5,。?!,、;:“ ”‘ ’( )《 》〈 〉]', "", content)#只保留中文和标点
result = re.sub(r'[^\u4e00-\u9fa5]', "",content)#只保留中文result = re.sub(r'[^\0-9\.\u4e00-\u9fa5,。?!,、;:“ ”‘ ’( )《 》〈 〉]', "", content)#只保留中文和标点和数字result = re.sub(r'[^\u4e00-\u9fa5,A-Za-z0-9]', "",content)#只保留中文、英文和数字

文本去除两个以上空格

content=re.sub(r'\s{2,}', '', content)

bas4编码变成中文

def bas4_decode(bas4_content):    decodestr= base64.b64decode(bas4_content)    result = re.sub(r'[^\0-9\.\u4e00-\u9fa5,。?!,、;:“ ”‘ ’( )《 》〈 〉]', "", decodestr.decode())#只保留中文和标点和数字    return result

文本去停用词

def text_to_wordlist(text):    result = re.sub(r'[^\u4e00-\u9fa5]', "",text)    f1_seg_list = jieba.cut(result)#需要添加一个词典,来弥补结巴分词中没有的词语,从而保证更高的正确率    f_stop = codecs.open(".\stopword.txt","r","utf-8")    try:        f_stop_text = f_stop.read()    finally:        f_stop.close()    f_stop_seg_list = f_stop_text.split()    test_words = []    for myword in f1_seg_list:        if myword not in f_stop_seg_list:            test_words.append(myword)                return test_words


文本特征提取

import jieba  import jieba.analyse  import numpy as np  #import json  import redef Textrank(content):    result = re.sub(r'[^\u4e00-\u9fa5]', "",content)    seg = jieba.cut(result)      jieba.analyse.set_stop_words('stopword.txt')    keyList=jieba.analyse.textrank('|'.join(seg), topK=10, withWeight=False)      return keyListdef TF_IDF(content):    result = re.sub(r'[^\u4e00-\u9fa5]', "",content)    seg = jieba.cut(result)      jieba.analyse.set_stop_words('stopword.txt')    keyWord = jieba.analyse.extract_tags(          '|'.join(seg), topK=10, withWeight=False, allowPOS=())#关键词提取,在这里对jieba的tfidf.py进行了修改       return keyWord




原创粉丝点击