python 爬取影視網(wǎng)站下載鏈接
項(xiàng)目地址:
https://github.com/GriffinLewis2001/Python_movie_links_scraper
運(yùn)行效果


導(dǎo)入模塊
import requests,re from requests.cookies import RequestsCookieJar from fake_useragent import UserAgent import os,pickle,threading,time import concurrent.futures from goto import with_goto
爬蟲(chóng)主代碼
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("請(qǐng)輸入劇名(輸入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n請(qǐng)輸入劇的編號(hào)(輸100跳過(guò)此次搜尋):"))
if dest == 100:
goto .end
x=0
print("\n以下為下載鏈接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("沒(méi)找到或不想看\n")
完整代碼
import requests,re
from requests.cookies import RequestsCookieJar
from fake_useragent import UserAgent
import os,pickle,threading,time
import concurrent.futures
from goto import with_goto
def get_content_url_name(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" class="thumbnail-img" title="(.*?)"')
url_name_list=reg.findall(content)
return url_name_list
def get_content(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
return response.text
def search_durl(url):
content=get_content(url)
reg=re.compile(r"{'\\x64\\x65\\x63\\x72\\x69\\x70\\x74\\x50\\x61\\x72\\x61\\x6d':'(.*?)'}")
index=reg.findall(content)[0]
download_url=url[:-5]+r'/downloadList?decriptParam='+index
content=get_content(download_url)
reg1=re.compile(r'title=".*?" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" ')
download_list=reg1.findall(content)
return download_list
def get_page(url):
send_headers = {
"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8"
}
cookie_jar = RequestsCookieJar()
cookie_jar.set("mttp", "9740fe449238", domain="www.yikedy.co")
response=requests.get(url,send_headers,cookies=cookie_jar)
response.encoding='utf-8'
content=response.text
reg=re.compile(r'<a target="_blank" class="title" href="(.*?)" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" rel="external nofollow" title="(.*?)">(.*?)<\/a>')
url_name_list=reg.findall(content)
return url_name_list
@with_goto
def main():
print("=========================================================")
name=input("請(qǐng)輸入劇名(輸入quit退出):")
if name == "quit":
exit()
url="http://www.yikedy.co/search?query="+name
dlist=get_page(url)
print("\n")
if(dlist):
num=0
count=0
for i in dlist:
if (name in i[1]) :
print(f"{num} {i[1]}")
num+=1
elif num==0 and count==len(dlist)-1:
goto .end
count+=1
dest=int(input("\n\n請(qǐng)輸入劇的編號(hào)(輸100跳過(guò)此次搜尋):"))
if dest == 100:
goto .end
x=0
print("\n以下為下載鏈接:\n")
for i in dlist:
if (name in i[1]):
if(x==dest):
for durl in search_durl(i[0]):
print(f"{durl}\n")
print("\n")
break
x+=1
else:
label .end
print("沒(méi)找到或不想看\n")
print("本軟件由CLY.所有\(zhòng)n\n")
while(True):
main()
以上就是python 爬取影視網(wǎng)站下載鏈接的詳細(xì)內(nèi)容,更多關(guān)于python 爬取下載鏈接的資料請(qǐng)關(guān)注腳本之家其它相關(guān)文章!
相關(guān)文章
Python實(shí)現(xiàn)PS圖像抽象畫(huà)風(fēng)效果的方法
這篇文章主要介紹了Python實(shí)現(xiàn)PS圖像抽象畫(huà)風(fēng)效果的方法,涉及Python基于skimage模塊進(jìn)行圖像處理的相關(guān)操作技巧,需要的朋友可以參考下2018-01-01
用python實(shí)現(xiàn)超強(qiáng)的加密軟件
大家好,本篇文章主要講的是用python實(shí)現(xiàn)超強(qiáng)的加密軟件,感興趣的同學(xué)趕快來(lái)看一看吧,對(duì)你有幫助的話記得收藏一下,方便下次瀏覽2022-01-01
python3的一個(gè)天坑問(wèn)題及解決方法:報(bào)錯(cuò)UnicodeDecodeError: ‘utf-8‘
在調(diào)試程序發(fā)現(xiàn)python3的一個(gè)天坑問(wèn)題:報(bào)錯(cuò)UnicodeDecodeError: ‘utf-8‘ codec can‘t decode byte 0xa3 in position 59: invalid,特此曝光,為眾位開(kāi)發(fā)朋友提個(gè)醒2023-09-09
使用Python的Tornado框架實(shí)現(xiàn)一個(gè)Web端圖書(shū)展示頁(yè)面
Tornado是Python的一款高人氣Web開(kāi)發(fā)框架,這里我們來(lái)展示使用Python的Tornado框架實(shí)現(xiàn)一個(gè)Web端圖書(shū)展示頁(yè)面的實(shí)例,通過(guò)該實(shí)例可以清楚地學(xué)習(xí)到Tornado的模板使用及整個(gè)Web程序的執(zhí)行流程.2016-07-07
基于Pytorch實(shí)現(xiàn)分類器的示例詳解
這篇文章主要為大家詳細(xì)介紹了如何基于Pytorch實(shí)現(xiàn)兩個(gè)分類器:?softmax分類器和感知機(jī)分類器,文中的示例代碼講解詳細(xì),需要的可以參考一下2023-04-04

