python多線程爬取西刺代理的示例代碼
西刺代理是一個國內(nèi)IP代理,由于代理倒閉了,所以我就把原來的代碼放出來供大家學習吧。
鏡像地址:https://www.blib.cn/url/xcdl.html
首先找到所有的tr標簽,與class="odd"的標簽,然后提取出來。
然后再依次找到tr標簽里面的所有td標簽,然后只提取出里面的[1,2,5,9]這四個標簽的位置,其他的不提取。
最后可以寫出提取單一頁面的代碼,提取后將其保存到文件中。
import sys,re,threading import requests,lxml from queue import Queue import argparse from bs4 import BeautifulSoup head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36"} if __name__ == "__main__": ip_list=[] fp = open("SpiderAddr.json","a+",encoding="utf-8") url = "https://www.blib.cn/url/xcdl.html" request = requests.get(url=url,headers=head) soup = BeautifulSoup(request.content,"lxml") data = soup.find_all(name="tr",attrs={"class": re.compile("|[^odd]")}) for item in data: soup_proxy = BeautifulSoup(str(item),"lxml") proxy_list = soup_proxy.find_all(name="td") for i in [1,2,5,9]: ip_list.append(proxy_list[i].string) print("[+] 爬行列表: {} 已轉(zhuǎn)存".format(ip_list)) fp.write(str(ip_list) + '\n') ip_list.clear()
爬取后會將文件保存為 SpiderAddr.json 格式。
最后再使用另一段代碼,將其轉(zhuǎn)換為一個SSR代理工具直接能識別的格式,{'http': 'http://119.101.112.31:9999'}
import sys,re,threading import requests,lxml from queue import Queue import argparse from bs4 import BeautifulSoup if __name__ == "__main__": result = [] fp = open("SpiderAddr.json","r") data = fp.readlines() for item in data: dic = {} read_line = eval(item.replace("\n","")) Protocol = read_line[2].lower() if Protocol == "http": dic[Protocol] = "http://" + read_line[0] + ":" + read_line[1] else: dic[Protocol] = "https://" + read_line[0] + ":" + read_line[1] result.append(dic) print(result)
完整多線程版代碼如下所示。
import sys,re,threading import requests,lxml from queue import Queue import argparse from bs4 import BeautifulSoup head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36"} class AgentSpider(threading.Thread): def __init__(self,queue): threading.Thread.__init__(self) self._queue = queue def run(self): ip_list=[] fp = open("SpiderAddr.json","a+",encoding="utf-8") while not self._queue.empty(): url = self._queue.get() try: request = requests.get(url=url,headers=head) soup = BeautifulSoup(request.content,"lxml") data = soup.find_all(name="tr",attrs={"class": re.compile("|[^odd]")}) for item in data: soup_proxy = BeautifulSoup(str(item),"lxml") proxy_list = soup_proxy.find_all(name="td") for i in [1,2,5,9]: ip_list.append(proxy_list[i].string) print("[+] 爬行列表: {} 已轉(zhuǎn)存".format(ip_list)) fp.write(str(ip_list) + '\n') ip_list.clear() except Exception: pass def StartThread(count): queue = Queue() threads = [] for item in range(1,int(count)+1): url = "https://www.xicidaili.com/nn/{}".format(item) queue.put(url) print("[+] 生成爬行鏈接 {}".format(url)) for item in range(count): threads.append(AgentSpider(queue)) for t in threads: t.start() for t in threads: t.join() # 轉(zhuǎn)換函數(shù) def ConversionAgentIP(FileName): result = [] fp = open(FileName,"r") data = fp.readlines() for item in data: dic = {} read_line = eval(item.replace("\n","")) Protocol = read_line[2].lower() if Protocol == "http": dic[Protocol] = "http://" + read_line[0] + ":" + read_line[1] else: dic[Protocol] = "https://" + read_line[0] + ":" + read_line[1] result.append(dic) return result if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-p","--page",dest="page",help="指定爬行多少頁") parser.add_argument("-f","--file",dest="file",help="將爬取到的結(jié)果轉(zhuǎn)化為代理格式 SpiderAddr.json") args = parser.parse_args() if args.page: StartThread(int(args.page)) elif args.file: dic = ConversionAgentIP(args.file) for item in dic: print(item) else: parser.print_help()
以上就是python多線程爬取西刺代理的示例代碼的詳細內(nèi)容,更多關(guān)于python多線程爬取代理的資料請關(guān)注腳本之家其它相關(guān)文章!
相關(guān)文章
解決python打開https出現(xiàn)certificate verify failed的問題
這篇文章主要介紹了解決python打開https出現(xiàn)certificate verify failed的問題,具有很好的參考價值,希望對大家有所幫助。一起跟隨小編過來看看吧2020-09-09django實現(xiàn)模型字段動態(tài)choice的操作
這篇文章主要介紹了django實現(xiàn)模型字段動態(tài)choice的操作,具有很好的參考價值,希望對大家有所幫助。一起跟隨小編過來看看吧2020-04-04