Python基础小笔记《2017-09-27》

来源:互联网 发布:乌龟能跑多快 知乎 编辑:程序博客网 时间:2024/05/30 04:35

一、pickle的使用
dump和load
dumps和loads

二、列表解析

squared = [x*x for x in range(4)]

三、一个小东西

import urllib.requestimport osdef url_open(url):      req = urllib.request.Request(url)      req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.18 Safari/537.36 OPR/49.0.2720.0 (Edition developer)')      proxies = [] #ip地址      response = urllib.request.urlopen(url)      html = response.read()      return htmldef get_page(url):      html = url_open(url).decode('utf-8')      a = html.find('current-comment-page') + 23      b = html.find(']',a)      return html[a:b]def find_imgs(url):      html = url_open(url).decode('utf-8')      img_addrs = []      a = html.find('img src=')      while a !=-1 :                b = html.find('.jpg',a, a+255)                if b != -1:                          img_addrs.append(html[a+9:b+4])                else:                          b = a + 9                a = html.find('img src=', b)      return img_addrsdef save_imgs(folder, img_addrs):      for each in img_addrs:                filename = each.split('/')[-1]                with open(filename,'wb') as f:                          img = url_open(each)                          f.write(img)def download_mm(folder = 'new_test', pages = 10):      os.mkdir(folder)      os.chdir(folder)      url = 'http://jandan.net/pic/'      page_num = int(get_page(url))      for i in range(pages):                page_num -= i                page_url = url + 'page-' + str(page_num) + '#comments'                img_addrs = find_imgs(page_url)                save_imgs(folder,img_addrs)if __name__ == '__main__':      download_mm()

四、基于百度翻译的汉语翻译

import urllib.requestimport urllib.parseimport jsonimport timewhile True:      print("--------欢迎使用英文词典---------")      content = input("请输入需要翻译的内容(输入‘Q!’退出程序):")      if content == 'q!' or content == 'Q!':                break      url = "http://fanyi.baidu.com/v2transapi"      head = {}  #1      head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' #1      data = {}      data['from'] = 'en'      data['to'] = 'zh'      data['query'] = content#'I love FishC.com'      data['client'] = 'fanyideskweb'      data['smartresult'] = 'dict'      data['typoResult'] = 'true'      data['doctype'] = 'json'      data['version'] = '2.1'      data['keyfrom'] = 'fanyi.web'      data['salt'] = '1506959104653'      data['sign'] = 'd71f3b0b47a5e188651757826d924ceb'      data['action'] = 'FY_BY_CLICKBUTTION'      data = urllib.parse.urlencode(data).encode('utf-8')      req = urllib.request.Request(url,data,head)  #1      #req =urllib.request.Request(url,data)  #2      #2      #req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')      response = urllib.request.urlopen(req)      html = response.read().decode('utf-8')      target = json.loads(html)      #print("翻译结果:%s" % (target['trans_result']['data'][0]['result'][0][1]))      print("翻译结果:%s" % (target['trans_result']['data'][0]['dst']))      time.sleep(1)

五、字符串的拼接

info = '''-------- info of  %s  -----Name:%sAge:%dJob:%sSalary:%s''' % (name,name,age,job,salary)info2 = '''-------- info of {_name}  -----Name:{_name}Age:{_age}Job:{_job}Salary:{_salary}'''.format(_name=name,       _age=age,       _job=job,       _salary=salary)##常用info2info3 =  '''-------- info of {0} -----Name:{0}Age:{1}Job:{2}Salary:{3}'''.format(name,age,job,salary)