python中文件的读写(含爬虫内容)

来源:互联网 发布:java中compareto 编辑:程序博客网 时间:2024/06/06 02:02
#pandas中的解析函数#read_csv:从文件,URL,文件型对象中加载带分隔符的数据。默认分隔符为“逗号”#read_table:从文件,URL,,文件型对象中加载带分隔符的数据。默认分隔符数据为制表符(‘\t’)#read_fwf:读取定宽列格式数据(即没有分隔符)#read_clipboard:读取剪切板上的数据,可以看做read_table的剪贴版,将网页转换为表格时有用。import pandas as pdimport numpy as npdf=pd.read_table('D:\Azhongmin2015\python_code\code_public.txt',header=None)#读入文件,无列表头df=pd.read_table('D:\Azhongmin2015\python_code\code_public.txt',sep=',',names=list('abcde'))#abcde为列表头df2=pd.read_csv('D:\Azhongmin2015\python_code\\test2.csv',index_col=['key1','key2'])#层次索引,遇到t开始要加双斜线list(open("D:\Azhongmin2015\python_code\\test1.txt"))#列表显示df3=pd.read_csv('D:\Azhongmin2015\python_code\\test1.txt',sep='\s+')#用正则表达式‘\s+’去除多余空白符df3=pd.read_csv('D:\Azhongmin2015\python_code\\test1.txt',sep='\s+',nrows=2)#只读取两行'''XML和HTML:web信息收集'''#python 3.x中urllib库和urilib2库合并成了urllib库#urllib2.urlopen()变成了urllib.request.urlopen() #urllib2.Request()变成了urllib.request.Request() import urllibfrom lxml.html import parsefrom urllib.request import urlopenht2='http://finance.yahoo.com/quote/AAPL/options?ltr=1'ht='http://ecpi.ggj.gov.cn/jndfgz/201606/t20160623_290399.htm'parsed=parse(urlopen(ht2))doc=parsed.getroot()#links=doc.findall('.//a')#获取文档中所有url链接links[10:15]#结果:[<Element a at 0xd97b70>, <Element a at 0xd97ba0>, <Element a at 0xd97bd0>, <Element a at 0xd97c00>, <Element a at 0xd97c30>]'''links是表示HTML元素的对象,要得到URL和链接文本,必须使用各对象的get方法(针对URL)和text_content方法(针对显示文本)'''lnk=links[1]#结果:<Element a at 0xd97ed0>lnk.get('href')#结果'https://mail.yahoo.com/?.intl=us&.lang=en-US'lnk.text_content()#结果:'Mail'urls=[lnk.get('href') for lnk in doc.findall('.//a')]#显示所有链接for line in ruls:    print line.strip()#encoding utf-8content=urllib.request.urlopen(ht).read() df3=pd.read_csv('D:\Azhongmin2015\python_code\cotent_lili.csv')path=nltk.data.find('D:\Azhongmin2015\python_code\cotent_lili.csv')#事件描述编码file_object=open('D:\Azhongmin2015\python_code\cotent_lili.csv').read().split('\n')  Rs=[]for i in range(660):    result=[]    seg_list = jieba.cut(file_object[i])    for w in seg_list :        result.append(w)    Rs.append(result)import csvfile=open('D:\Azhongmin2015\python_code\\result.csv','w')writer = csv.writer(file)writer.writerows(Rs)#按行写入#file.write(str(Rs))file.close()       #确认单编码file_object1=open('D:\Azhongmin2015\python_code\confirm.csv').read().split('\n')  Rs1=[]for i in range(660):    result=[]    seg_list = jieba.cut(file_object1[i])    for w in seg_list :        result.append(w)    Rs1.append(result)#写入CSVimport csvfile=open('D:\Azhongmin2015\python_code\\result3.csv','w')writer = csv.writer(file)writer.writerows(Rs1)#按行写入#file.write(str(Rs))file.close() #处理意见编码#认单编码#coding='utf-8'import csvimport jiebaimport pandas as pdfile_object2=open('D:\Azhongmin2015\python_code\\advice.csv').read().split('\n')  Rs2=[]for i in range(660):    result=[]    seg_list = jieba.cut(file_object2[i])    for w in seg_list :        result.append(w)    Rs2.append(result)#写入CSVfile=open('D:\Azhongmin2015\python_code\\result2.csv','w')writer = csv.writer(file)writer.writerows(Rs2)#按行写入#file.write(str(Rs))file.close()    


0 0
原创粉丝点击