将字符串解析成json字符串时,遇到的错误

来源:互联网 发布:青岛西海岸交易软件 编辑:程序博客网 时间:2024/06/06 13:50

ECMA script注明json字符串中需要转义的字符 :    "  /  \ b f n r t 


问题描述及解决:


1出现 0xae  174 超acsii,采取替换  

s = str(html).replace("\\xae","") #转成str,替换编码174的符号为空

2 出现 \\  替换成 \  

s = s.replace("\\\\","\\")  #将\\转换成\

3 出现\'  替换成'   

s = s.replace("\\'" , "'")



代码

from urllib.request import *import reimport jsonimport urllib.parseimport urllib.requestfrom bs4 import BeautifulSoupimport randomimport reimport jsondef randHeader():    head_connection = ['Keep-Alive', 'close']    head_accept = ['text/html, application/xhtml+xml, */*']    head_accept_language = ['zh-CN,fr-FR;q=0.5', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']    head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',                       'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',                       'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',                       'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',                       'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',                       'Opera/9.27 (Windows NT 5.2; U; zh-cn)',                       'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',                       'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',                       'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',                       'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',                       'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',                       'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']    header = {        'Connection': head_connection[0],        'Accept': head_accept[0],        'Accept-Language': head_accept_language[1],        'User-Agent': head_user_agent[random.randrange(0, len(head_user_agent))]    }    return header# url = 'http://www.ebay.com/itm/152586216774'# req = urllib.request.Request(url= url , headers=randHeader())# webpage = urllib.request.urlopen(req)# html = webpage.read()# soup = BeautifulSoup(html, 'html.parser')# print(soup)def getdesc(ebayno):    url = "http://vi.vipr.ebaydesc.com/ws/eBayISAPI.dll?ViewItemDescV4&item="+ebayno    req = urllib.request.Request(url= url , headers=randHeader())    webpage = urllib.request.urlopen(req)    html = webpage.read()    soup = BeautifulSoup(html, 'html.parser') #解析    print(soup.prettify())    content = soup.find_all("div", class_="descriptionbox")[0] #选取desc部分    p = content.find_all("span") #选取段落部分    # print(p[0].strong.string)  #p[0]是Description    content = p[1]  #p[1]是内容    pp = content.p.prettify() #将内容格式化    pp = BeautifulSoup(pp,"html.parser")#解析    #写文本到txt文件    file_object = open(str(ebayno)+"description.txt", 'w')    file_object.write("Description")    for child in pp.strings:        print(repr(child))        file_object.write(child)## qs = [#     "253013768959"### ]## for q in qs:#     getdesc(q)# url = 'http://catalog.monroe.com/catalogPart/partResults.do?&domain=monroe&locale=en&partNumber=171340L&selection=findPart'# req = urllib.request.Request(url= url , headers=randHeader())# webpage = urllib.request.urlopen(req)# html = webpage.read().decode()# # soup = BeautifulSoup(html, 'html.parser')# print(html)# sjson = re.findall("\[(.*?)\]",html)[0]# sjson = json.loads(sjson)# selectedPartId = sjson["value"]# print(selectedPartId)## url = 'http://catalog.monroe.com/catalog/catalogOptions.do?&domain=monroe&locale=en&selection=catalog'# req = urllib.request.Request(url= url , headers=randHeader())# webpage = urllib.request.urlopen(req)# html = webpage.read().decode()# # soup = BeautifulSoup(html, 'html.parser')# print(html)# sjson = re.findall("(.*?)",html)[0]# sjson = json.loads(sjson)# selectedCatalogId = sjson["value"]# print(selectedCatalogId)selectedPartId = "1904279"selectedCatalogId = "974"url = "http://catalog.monroe.com/catalogPart/partResults.do?&selectedCatalogId="+str(selectedCatalogId)+"&selectedPartId="+str(selectedPartId)+"&selection=partDetails"# print(url)req = urllib.request.Request(url= url , headers=randHeader())webpage = urllib.request.urlopen(req)html = webpage.read() #  字节编码print(html)s = str(html).replace("\\xae","") #转成str,替换编码174的符号为空print(s)s = re.findall("^b'(.*)'", s)[0]print(s)s = s.replace("\\\\","\\")  #将\\转换成\s = s.replace("\\'" , "'")#将\'转换成's = json.loads(s )print(s)att = s["part"]["attributes"]print(att)



原创粉丝点击