彼岸桌面壁纸 附链接自取
import time
import requests
from lxml import etree
import os
# http://www.netbian.com/ 爬虫
if __name__ == '__main__':
filePath = './保存图片'
if not os.path.exists(filePath):
os.mkdir(filePath)
page_next = 'http://www.netbian.com/dongman/index.htm' #第一页
header = { #UA伪装
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77"
}
for _ in range(1,133):
page_text = requests.get(url=page_next, headers=header)
page_text.encoding = 'gbk'
tree = etree.HTML(page_text.text)
li_list = tree.xpath('//div[@class="list"]//li')
_next = tree.xpath('//div[@class="page"]/a[@class="prev"]/@href')
if len(_next) == 1:
page_next = 'http://www.netbian.com/' + _next[0]
else:
page_next = 'http://www.netbian.com/' + _next[1]
for _ in li_list:
time.sleep(0.3)
href = _.xpath('./a/@href')
if href != []:
href = href[0]
else :
continue
if href == 'https://pic.netbian.com/': #广告
continue
page_url = 'http://www.netbian.com/' + href
title = _.xpath('./a/img/@alt')[0]
spanIndex = title.find(' ',0,len(title)) #空格位置
title = filePath + '/' + title[0:spanIndex] + '.jpg'
img_page = requests.get(url=page_url, headers=header)
_tree = etree.HTML(img_page.text)
img_url = _tree.xpath('//div[@class="pic"]//a/img/@src')[0]
try:
img_file = requests.get(img_url, headers=header, stream=True)
if img_file != None:
with open(title, 'wb') as f:
f.write(img_file.content)
print(title + '下载成功')
except:
print('异常咯,不用管')
print('Over 全部下载完成')
链接:https://pan.baidu.com/s/1ykz-qpThESUDFP_qQpBVzg?pwd=9mvj
提取码:9mvj
--来自百度网盘超级会员V5的分享
-
Python
就是白拿 nice
技术豆
看不懂,告辞
6
这个怎么用?
能血脉喷张吗?
可以看看 彼岸桌面官网 应该很正常
py 爬虫
直接给我壁纸
链接:https://pan.baidu.com/s/1ykz-qpThESUDFP_qQpBVzg?pwd=9mvj
提取码:9mvj
--来自百度网盘超级会员V5的分享
连接打不开
连接打不开
能打开啊
能打开
👍