multiprocess Poll.map python多进程提取处理大量文本的关键词

来源:互联网 发布:新海诚风格知乎 编辑:程序博客网 时间:2024/06/05 20:02


import sys  
reload(sys)  
sys.setdefaultencoding("utf-8")  
from multiprocessing import Pool,Queue,Process  
import multiprocessing as mp   
import time,random  
import os  
import codecs  
import jieba.analyse  
jieba.analyse.set_stop_words("yy_stop_words.txt")  
  
def extract_keyword(input_string):  


    tags = jieba.analyse.extract_tags(input_string, topK=100)  
    return tags  
  
def parallel_extract_keyword(input_string):  
    tags = jieba.analyse.extract_tags(input_string, topK=100)  
    return tags  
if __name__ == "__main__":  
  
  
    data_file = sys.argv[1]  
    with codecs.open(data_file) as f:  
        lines = f.readlines()  
        f.close()  
      
    out_put = data_file.split('.')[0] +"_tags.txt"   
    t0 = time.time()  
    for line in lines:  
        parallel_extract_keyword(line)  
    print("串行处理花费时间{t}".format(t=time.time()-t0))  
  
      
    pool = Pool(processes=int(mp.cpu_count()*0.7))  
    t1 = time.time()  
    res = pool.map(parallel_extract_keyword,lines)  
  
    pool.close()  
    pool.join()  
    print("并行处理花费时间{t}s".format(t=time.time()-t1))
原创粉丝点击