python爬虫结合代理和xpath

发布时间:2024年01月16日
import urllib.parse
import urllib.request
from lxml import etree


# 定制对象
def create_request(page):
    if page == 1:
        url = '你的url'
    else:
        url = '你的url' + str(page) + '.html'
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
    }
    request = urllib.request.Request(url=url, headers=header)
    return request


# 获取内容
def get_contend(request):
    # 获取handler对象
    handler = urllib.request.HTTPHandler()
    # 获取opener对象
    opener = urllib.request.build_opener(handler)
    # 调用open方法
    reponse = opener.open(request)
    contends = reponse.read().decode('utf-8')
    return contends


# 下载
def down_load_file(contend):
    tree = etree.HTML(contend)
    names = tree.xpath('//div[@class="item"]//img/@alt')
    srcs = tree.xpath('//div[@class="item"]//img/@data-original')
    filename = 'C:\\Users\\nk\\code\\py\\pythonProject\\pachong\\pachongphoto\\'
    for i in range(len(names)):
        print(names[i])
        name = names[i]
        src = srcs[i]
        url = "https:" + src
        urllib.request.urlretrieve(url, filename=filename + name + '.jpg')


if __name__ == '__main__':
    start = int(input("起始页码"))
    end = int(input("结束的页码"))
    for page in range(start, end + 1):
        request = create_request(page)
        contends = get_contend(request)
        down_load_file(contends)

文章来源:https://blog.csdn.net/2401_82591739/article/details/135602034
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。