python爬蟲實(shí)現(xiàn)獲取下一頁代碼
我們首先來看下實(shí)例代碼:
from time import sleep
import faker
import requests
from lxml import etree
fake = faker.Faker()
base_url = "http://angelimg.spbeen.com"
def get_next_link(url):
content = downloadHtml(url)
html = etree.HTML(content)
next_url = html.xpath("http://a[@class='ch next']/@href")
if next_url:
return base_url + next_url[0]
else:
return False
def downloadHtml(ur):
user_agent = fake.user_agent()
headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
response = requests.get(url, headers=headers)
return response.text
def getImgUrl(content):
html = etree.HTML(content)
img_url = html.xpath('//*[@id="content"]/a/img/@src')
title = html.xpath(".//div['@class=article']/h2/text()")
return img_url[0],title[0]
def saveImg(title,img_url):
if img_url is not None and title is not None:
with open("txt/"+str(title)+".jpg",'wb') as f:
user_agent = fake.user_agent()
headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"}
content = requests.get(img_url, headers=headers)
#request_view(content)
f.write(content.content)
f.close()
def request_view(response):
import webbrowser
request_url = response.url
base_url = '<head><base href="%s" rel="external nofollow" >' %(request_url)
base_url = base_url.encode()
content = response.content.replace(b"<head>",base_url)
tem_html = open('tmp.html','wb')
tem_html.write(content)
tem_html.close()
webbrowser.open_new_tab('tmp.html')
def crawl_img(url):
content = downloadHtml(url)
res = getImgUrl(content)
title = res[1]
img_url = res[0]
saveImg(title,img_url)
if __name__ == "__main__":
url = "http://angelimg.spbeen.com/ang/4968/1"
while url:
print(url)
crawl_img(url)
url = get_next_link(url)
python 爬蟲如何執(zhí)行自動下一頁循環(huán)加載文字
from bs4 import BeautifulSoup
import requests
import time
from lxml import etree
import os
# 該demo執(zhí)行的為如何利用bs去爬一些文字
def start():
# 發(fā)起網(wǎng)絡(luò)請求
html=requests.get('http://www.baidu.com')
#編碼
html.encoding=html.apparent_encoding
#創(chuàng)建sp
soup=BeautifulSoup(html.text,'html.parser')
print(type(soup))
print('打印元素')
print(soup.prettify())
#存儲一下title 該方法沒有提示直接展示
title=soup.head.title.string
print(title)
# 寫入文本
with open(r'C:/Users/a/Desktop/a.txt','w') as f:
f.write(title)
print(time.localtime())
url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml'
def get_html_from_bs4(url):
# response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8')
response = requests.get(url).content.decode('utf-8')
soup = BeautifulSoup(response, 'html.parser')
next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href')
# for i in nett
print(next_page)
next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
def get_html_from_etree(url):
response = requests.get(url).content.decode('utf-8')
html= etree.HTML(response)
next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0]
print(next_page)
# next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page
get_html_from_etree(url_2)
if __name__ == '__main__':
start()
到此這篇關(guān)于python爬蟲實(shí)現(xiàn)獲取下一頁代碼的文章就介紹到這了,更多相關(guān)python爬蟲獲取下一頁內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
相關(guān)文章
Python動力系統(tǒng)驗(yàn)證三體人是否真的存在
這篇文章主要介紹了Python動力系統(tǒng)驗(yàn)證三體人是否真的存在,文中含有詳細(xì)的圖文示例,有需要的朋友可以借鑒參考下,希望能夠有所幫助2021-10-10
使用Python生成200個激活碼的實(shí)現(xiàn)方法
這篇文章主要介紹了使用Python生成200個激活碼的實(shí)現(xiàn)方法,文中通過示例代碼介紹的非常詳細(xì),對大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價值,需要的朋友們下面隨著小編來一起學(xué)習(xí)學(xué)習(xí)吧2019-11-11
python環(huán)境下OPenCV處理視頻流局部區(qū)域像素值
這篇文章主要為大家介紹了python環(huán)境下OPenCV處理視頻流局部區(qū)域像素值的實(shí)現(xiàn)示例,有需要的朋友可以借鑒參考下,希望能夠有所幫助,祝大家多多進(jìn)步2021-11-11
基礎(chǔ)語音識別-食物語音識別baseline(CNN)
這篇文章主要介紹了一個基礎(chǔ)語音識別題目-食物語音識別baseline(CNN),代碼詳細(xì)嗎,對于想要學(xué)習(xí)語音識別的朋友可以參考下2021-04-04
Python學(xué)習(xí)之configparser模塊的使用詳解
ConfigParser是用來讀取配置文件的包。這篇文章主要通過一些簡單的實(shí)例帶大家了解一下ConfigParser模塊的具體使用,感興趣的小伙伴跟隨小編一起了解一下2023-01-01
Python中利用all()來優(yōu)化減少判斷的實(shí)例分析
在本篇文章里小編給大家整理的是一篇關(guān)于Python中利用all()來優(yōu)化減少判斷的實(shí)例分析內(nèi)容,有需要的朋友們可以學(xué)習(xí)下。2021-06-06

