Python爬虫:关键词图片下载器

爬虫网站:百度图片搜索

小小代码,直接奉上源码

import requests
from fake_useragent import UserAgent
from urllib.parse import urlencode
import json
import time
import os

headers = {
    "User-Agent": UserAgent().random
}


def get_photo(url, page, n):
    response = requests.get(url, headers=headers, timeout=5)

    path = os.getcwd() + "\\photo"

    if not os.path.exists(os.path.join(os.getcwd(), "photo")):
        os.mkdir(os.getcwd() + "\\photo")

    with open(path + "\\{}_{}.jpg".format(page, n), "wb") as f:
        f.write(response.content)


def spider(base_url, pn, time_stamp, gsm, keyword):
    params = {
        "tn": "resultjson_com",
        "logid": "7151036769869146123",
        "ipn": "rj",
        "ct": "201326592",
        "is": "",
        "fp": "result",
        "queryWord": keyword + "头像",
        "cl": "2",
        "lm": "-1",
        "ie": "utf-8",
        "oe": "utf-8",
        "adpicid": "",
        "st": "",
        "z": "",
        "ic": "",
        "hd": "",
        "latest": "",
        "copyright": "",
        "word": keyword + "头像",
        "s": "",
        "se": "",
        "tab": "",
        "width": "",
        "height": "",
        "face": "",
        "istype": "",
        "qc": "",
        "nc": "",
        "fr": "",
        "expermode": "",
        "nojc": "",
        "cg": "head",
        "pn": str(pn*30),
        "rn": "30",
        "gsm": gsm,
        str(time_stamp): ""
    }
    url = base_url + urlencode(params)
    response = requests.get(url, headers=headers, timeout=5).text

    second_response = response.replace('\\', '\\\\')
    json_response = json.loads(second_response)

    photo_urls_collections = json_response["data"]

    for i in range(len(photo_urls_collections)-1):
        print(photo_urls_collections[i]["hoverURL"])
        get_photo(photo_urls_collections[i]["hoverURL"], pn, i)


def main():
    base_url = "https://image.baidu.com/search/acjson?"
    keyword = input("请输入您想要爬取的图片关键字:")
    print("------------------------------------------------------------------------------")
    photo_page = int(input("请输入要爬取的页数:"))
    gsm = hex(photo_page * 30)
    for i in range(1, photo_page+1):
        print("------------------------------------------------------------------------------")
        print("---------------------------------开始爬取第{}页---------------------------------".format(i))
        time_stamp = round(time.time() * 1000)
        spider(base_url, i, time_stamp, gsm, keyword)
        print("---------------------------------第{}页爬取完毕---------------------------------".format(i))
        time.sleep(1)
    print("爬取完毕, 主人!")


if __name__ == '__main__':
    main()

演示
在这里插入图片描述
在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

妖怪喜欢风

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值