python多線程抓取天涯帖子內(nèi)容示例
使用re, urllib, threading 多線程抓取天涯帖子內(nèi)容,設(shè)置url為需抓取的天涯帖子的第一頁,設(shè)置file_name為下載后的文件名
#coding:utf-8
import urllib
import re
import threading
import os, time
class Down_Tianya(threading.Thread):
"""多線程下載"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根據(jù)傳入的url抓出各頁內(nèi)容,按頁數(shù)做鍵存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<span>時(shí)間:(.*?)</span>.*?<!-- <div class="host-ico">樓主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根據(jù)第一頁地址抓取總頁數(shù)"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下頁</a>')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典內(nèi)容按鍵(頁數(shù))寫入文本,每個(gè)鍵值為每頁內(nèi)容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根據(jù)頁數(shù)構(gòu)造urls進(jìn)行多線程下載"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""檢查下載完成后再進(jìn)行寫入"""
for t in threads:
t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
down_tianya.py
#coding:utf-8
import urllib
import re
import threading
import os
class Down_Tianya(threading.Thread):
"""多線程下載"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根據(jù)傳入的url抓出各頁內(nèi)容,按頁數(shù)做鍵存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<div class="atl-item".*?<span>時(shí)間:(.*?)</span>.*?<!-- <div class="host-ico">樓主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根據(jù)第一頁地址抓取總頁數(shù)"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下頁</a>')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典內(nèi)容按鍵(頁數(shù))寫入文本,每個(gè)鍵值為每頁內(nèi)容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根據(jù)頁數(shù)構(gòu)造urls進(jìn)行多線程下載"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""檢查下載完成后再進(jìn)行寫入"""
for t in threads:
t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
- Python使用代理抓取網(wǎng)站圖片(多線程)
- Python代理抓取并驗(yàn)證使用多線程實(shí)現(xiàn)
- Python實(shí)現(xiàn)多線程抓取妹子圖
- python實(shí)現(xiàn)多線程抓取知乎用戶
- Python實(shí)現(xiàn)多線程抓取網(wǎng)頁功能實(shí)例詳解
- Python之多線程爬蟲抓取網(wǎng)頁圖片的示例代碼
- python Selenium爬取內(nèi)容并存儲(chǔ)至MySQL數(shù)據(jù)庫的實(shí)現(xiàn)代碼
- Python實(shí)現(xiàn)批量讀取圖片并存入mongodb數(shù)據(jù)庫的方法示例
- Python3實(shí)現(xiàn)的爬蟲爬取數(shù)據(jù)并存入mysql數(shù)據(jù)庫操作示例
- Python基于多線程實(shí)現(xiàn)抓取數(shù)據(jù)存入數(shù)據(jù)庫的方法
相關(guān)文章
使用Python第三方庫pygame寫個(gè)貪吃蛇小游戲
這篇文章主要介紹了使用Python第三方庫pygame寫個(gè)貪吃蛇小游戲,本文通過實(shí)例代碼給大家介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或工作具有一定的參考借鑒價(jià)值,需要的朋友可以參考下2020-03-03Python實(shí)現(xiàn)自定義異常堆棧信息的示例代碼
當(dāng)我們的程序報(bào)錯(cuò)時(shí),解釋器會(huì)將整個(gè)異常的堆棧信息全部輸出出來。解釋器會(huì)將異常產(chǎn)生的整個(gè)調(diào)用鏈都給打印出來,那么問題來了,我們能不能自定義這些報(bào)錯(cuò)信息呢?本文就來為大家詳細(xì)講講2022-07-07keras-siamese用自己的數(shù)據(jù)集實(shí)現(xiàn)詳解
這篇文章主要介紹了keras-siamese用自己的數(shù)據(jù)集實(shí)現(xiàn)詳解,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過來看看吧2020-06-06Python爬蟲運(yùn)用正則表達(dá)式的方法和優(yōu)缺點(diǎn)
這篇文章主要給大家介紹了關(guān)于Python爬蟲運(yùn)用正則表達(dá)式的相關(guān)資料,文中通過示例代碼介紹的非常詳細(xì),對(duì)大家學(xué)習(xí)或者使用Python具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面來一起學(xué)習(xí)學(xué)習(xí)吧2019-08-08