Python抓取聚劃算商品分析頁(yè)面獲取商品信息并以XML格式保存到本地
本文實(shí)例為大家分享了Python抓取聚劃算商品頁(yè)面獲取商品信息并保存的具體代碼,供大家參考,具體內(nèi)容如下
#!/user/bin/python # -*- coding: gbk -*- #Spider.py import urllib2 import httplib import StringIO import gzip import re import chardet import sys import os import datetime from xml.dom.minidom import Document from BeautifulSoup import BeautifulSoup ## 這段代碼是用于解決控制臺(tái)打印漢字報(bào)錯(cuò)的問(wèn)題 reload(sys) sys.setdefaultencoding("utf8") ##################################################### ## debug模式開(kāi)關(guān),開(kāi)啟后可以看到Http請(qǐng)求的頭部信息以及debug日志 DEBUG = 1 NO_DEBUG = 0 httplib.HTTPConnection.debuglevel = DEBUG ## 是否顯示爬取網(wǎng)頁(yè)源代碼開(kāi)關(guān) showSrcCode = False ## 壓縮方式 ZIP_TYPE = "gzip" fileName = "auctions" location = "d://spiderData/" ## header headerConfig = {"User-Agent":"taobao-yanyuan.qzs", "Accept-encoding":ZIP_TYPE} ##################################################### #############class SpiderConfig ##################### class SpiderConfig: """ configuration for spider name and url """ def __init__(self, name, url): self.name = name self.url = url ##################################################### ##############class SpiderAuctionDomain############## class SpiderAuctionDomain: """ Store information with auctions spidered by python """ title = "" url = "" img = "" price = "" def __init__(self): pass ##################################################### ########class SpiderDefaultErrorHandler############## class SpiderDefaultErrorHandler(urllib2.HTTPDefaultErrorHandler): def http_error_default(self, req, fp, code, msg, hdrs): """ default error process handler for spider """ result = urllib2.HTTPError(req.get_full_url(), code, msg, hdrs, fp) result.status = code result.url = req.get_full_url() print "<", result.url, "Exception code :", result.status, ">" return result ##################################################### #############class SpiderHandler##################### class SpiderHandler: """ spider handler """ def spider(self, spiderConfig): try: request = urllib2.Request(spiderConfig.url) ## configure request hreader for key,val in headerConfig.items(): request.add_header(key, val) ## build opener opener = urllib2.build_opener(SpiderDefaultErrorHandler()) ## open request openRequest = opener.open(request) ## read data spiderData = openRequest.read() ## close opener.close() if 0 == len(spiderData): return if ZIP_TYPE== openRequest.headers.get("Content-Encoding"): spiderData = SpiderHandler.gzipData(self, spiderData) if httplib.HTTPConnection.debuglevel == DEBUG and showSrcCode: print spiderData # parse html SpiderHandler.parse(self, spiderData) except Exception,x: print "spider process Exception:", x def parse(self, spiderData): """ parse html content """ if httplib.HTTPConnection.debuglevel == DEBUG: charsetAnalyze = chardet.detect(spiderData) print "analyze spider data encode :",charsetAnalyze["encoding"] print "執(zhí)行解析", fileName soup = BeautifulSoup(spiderData) encode = soup.originalEncoding encoding = lambda x : x.encode(encode) if httplib.HTTPConnection.debuglevel == DEBUG: print "識(shí)別到編碼:", encode title = soup.head.title.string print encoding(title) spiderContents = soup.findAll(name="div", attrs={"class":"main-box avil"}) auctions = ["%s" % s for s in spiderContents] if auctions is None: return auctionList = [] for auc in auctions: auctionDomain = SpiderAuctionDomain() # parse auction link links = re.search(re.compile(r'<a href=[\"|\']http://ju.taobao.com/tg/life_home.htm\?item_id=([^>]*)[\"|\']', re.IGNORECASE), auc) if links is not None : auctionDomain.link = encoding("http://ju.taobao.com/tg/life_home.htm?item_id=" + "".join(["%s" % s for s in links.groups() if len(s) > 0])) #parse auction title titles = re.search(re.compile(r"([^>]*)</a></h2>", re.IGNORECASE), auc) if titles is not None: auctionDomain.title = encoding("".join(["%s" % t for t in titles.groups() if len(t) > 0])) #parse auction price price = re.search(re.compile(r"<strong class=\"J_juPrices\".*</b>([^<]*)</strong>", re.IGNORECASE), auc) if price is not None: auctionDomain.price = "".join(["%s" % p for p in price.groups() if len(p) > 0]) #parse image url imgs = re.search(re.compile(r"<img src=[\'\"]([^>]*)[\'\"]", re.IGNORECASE), auc) if imgs is not None: auctionDomain.img = "".join(["%s" % i for i in imgs.groups() if len(i) > 0]) auctionList.append(auctionDomain) print "成功解析商品信息:" for a in auctionList: print "--->",a.title # sort auction list auctionList = SpiderHandler.sortAuctionList(self, auctionList) # save in file SpiderHandler.save(self, auctionList) print "解析完成" pass def sortAuctionList(self, auctionList): """ 冒泡排序,按照價(jià)格排序 """ length = len(auctionList) if length < 2: return auctionList else: for i in range(length-1): for j in range(length - i -1): if float(auctionList[j].price) > float(auctionList[j+1].price): auctionList[j], auctionList[j+1] = auctionList[j+1], auctionList[j] return auctionList pass def save(self, auctionList): if auctionList is not None: doc = Document() auctions = doc.createElement("auctions") doc.appendChild(auctions) for auc in auctionList: auction = doc.createElement("auction") auctions.appendChild(auction) SpiderHandler.generateXML(self, doc, auction, "title", auc.title) SpiderHandler.generateXML(self, doc, auction, "price", auc.price) SpiderHandler.generateXML(self, doc, auction, "img", auc.img) SpiderHandler.generateXML(self, doc, auction, "link", auc.link) if False == os.path.exists(location): os.mkdir(location) file = open(location+fileName+".xml", 'w') file.write(doc.toprettyxml()) file.close() if httplib.HTTPConnection.debuglevel == DEBUG: print doc.toprettyxml() def generateXML(self, doc, f, name, txt): c = doc.createElement(name) f.appendChild(c) c.appendChild(doc.createTextNode(txt)) def gzipData(self, spiderData): """ get data from gzip """ if 0 == len(spiderData): return spiderData spiderDataStream = StringIO.StringIO(spiderData) spiderData = gzip.GzipFile(fileobj=spiderDataStream).read() return spiderData ##################################################### if __name__ == "__main__": nowtime = lambda:datetime.datetime.strftime(datetime.datetime.now(),"%Y年%m月%d日 %H時(shí)%m分%S秒") needSpiderUrl = {"suzhou":"http://ju.taobao.com/suzhou", "hangzhou":"http://ju.taobao.com/hangzhou", "shanghai":"http://ju.taobao.com/shanghai", "beijing":"http://ju.taobao.com/beijing", "chengdu":"http://ju.taobao.com/chengdu"} configList = [] for k,v in needSpiderUrl.items(): spiderConfig = SpiderConfig(k, v) configList.append(spiderConfig) spiderHandler = SpiderHandler() print "爬蟲(chóng)執(zhí)行開(kāi)始時(shí)間:",nowtime() for spiderConfig in configList: fileName = spiderConfig.name spiderHandler.spider(spiderConfig) print "爬蟲(chóng)執(zhí)行完畢時(shí)間:",nowtime()
更多內(nèi)容請(qǐng)參考專題《python爬取功能匯總》進(jìn)行學(xué)習(xí)。
以上就是本文的全部?jī)?nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持腳本之家。
- python 批量修改 labelImg 生成的xml文件的方法
- 利用Python3分析sitemap.xml并抓取導(dǎo)出全站鏈接詳解
- 橫向?qū)Ρ确治鯬ython解析XML的四種方式
- 用Python解析XML的幾種常見(jiàn)方法的介紹
- 深入解讀Python解析XML的幾種方式
- Python 解析XML文件
- 詳解在Python程序中解析并修改XML內(nèi)容的方法
- 簡(jiǎn)單介紹使用Python解析并修改XML文檔的方法
- Python中使用SAX解析xml實(shí)例
- python實(shí)現(xiàn)的批量分析xml標(biāo)簽中各個(gè)類別個(gè)數(shù)功能示例
相關(guān)文章
python和pywin32實(shí)現(xiàn)窗口查找、遍歷和點(diǎn)擊的示例代碼
這篇文章主要介紹了python和pywin32實(shí)現(xiàn)窗口查找、遍歷和點(diǎn)擊的示例代碼,文中通過(guò)示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面隨著小編來(lái)一起學(xué)習(xí)學(xué)習(xí)吧2020-04-04Spring實(shí)戰(zhàn)之使用util:命名空間簡(jiǎn)化配置操作示例
這篇文章主要介紹了Spring實(shí)戰(zhàn)之使用util:命名空間簡(jiǎn)化配置操作,結(jié)合實(shí)例形式分析了Spring使用util:命名空間簡(jiǎn)化配置操作的具體步驟與相關(guān)操作注意事項(xiàng),需要的朋友可以參考下2019-12-12python從ftp下載數(shù)據(jù)保存實(shí)例
這篇文章主要介紹了python從ftp下載數(shù)據(jù)到本地保存的實(shí)例代碼方法,大家參考使用吧2013-11-11python 消費(fèi) kafka 數(shù)據(jù)教程
今天小編就為大家分享一篇python 消費(fèi) kafka 數(shù)據(jù)教程,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過(guò)來(lái)看看吧2019-12-12一起解密Python中的*args和**kwargs無(wú)限可能的函數(shù)參數(shù)
這篇文章主要來(lái)跟大家一起解密Python中的*args和**kwargs無(wú)限可能的函數(shù)參數(shù)使用的靈活性,有需要的朋友可以借鑒參考下,希望能夠有所幫助,祝大家多多進(jìn)步,早日升職加薪2023-06-06pytorch的Backward過(guò)程用時(shí)太長(zhǎng)問(wèn)題及解決
這篇文章主要介紹了pytorch的Backward過(guò)程用時(shí)太長(zhǎng)問(wèn)題及解決方案,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。如有錯(cuò)誤或未考慮完全的地方,望不吝賜教2023-02-02Anaconda中Python虛擬環(huán)境的創(chuàng)建使用與刪除方法詳解
這篇文章主要為大家介紹了在Anaconda環(huán)境下,創(chuàng)建、使用與刪除Python虛擬環(huán)境的方法,具有一定的借鑒價(jià)值,需要的小伙伴可以跟隨小編一起了解一下2023-08-08淺談Pycharm調(diào)用同級(jí)目錄下的py腳本bug
今天小編就為大家分享一篇淺談Pycharm調(diào)用同級(jí)目錄下的py腳本bug,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過(guò)來(lái)看看吧2018-12-12