如何使用Python从指向子URL的URL下载PDF文件

3

我正在尝试从以下URL中的链接下载所有pdf文件:

https://www.adb.org/projects/documents/country/ban/year/2020?terms=education
https://www.adb.org/projects/documents/country/ban/year/2019?terms=education
https://www.adb.org/projects/documents/country/ban/year/2018?terms=education

这些URL包含指向包含PDF文件的子链接列表。主URL中的链接列表来自于一个国家、年份和术语的搜索结果。

我已经尝试使用以下代码,并以不同的方式进行更改。然而,它似乎没有起作用。任何帮助将不胜感激。谢谢。

import os
import time
from glob import glob 
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
 
url = ["https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
      "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
      "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"]

folder = glob("J:/pdfs/*/")

for i, folder_location in zip(url, folder):
    time.sleep(1)
    response = requests.get(i)
    soup= BeautifulSoup(response.text, "lxml")
    for link in soup.select("[href$='.pdf']"):

        filename = os.path.join(folder_location,link['href'].split('/')[-1])
        with open(filename, 'wb') as f:
            f.write(requests.get(urljoin(i,link['href'])).content)

1个回答

2
尝试一下,它会将文件放置在PDF文件夹中。
import os
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils

class MySpider(Spider):
    name = 'download_pdf'
    allowed_domains = ["www.adb.org"]
    start_urls = [
        "https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"
    ]  # Entry page

    def __init__(self):
        Spider.__init__(self, self.name)  #necessary
        if (not os.path.exists('./pdfs')):
            os.mkdir('./pdfs')

    def afterResponse(self, response, url, error=None, extra=None):
        try:
            path = './pdfs' + url[url.rindex('/'):]
            index = path.find('?')
            if index > 0: path = path[:index]
            flag = utils.saveResponseAsFile(response, path, fileType="pdf")
            if flag:
                return None
            else:  # If it's not a pdf, leave it to the frame
                return Spider.afterResponse(self, response, url, error)
        except Exception as err:
            print(err)

    def extract(self, url, html, models, modelNames):
        doc = SimplifiedDoc(html)
        lst = doc.selects('div.list >a').contains("documents/", attr="href")
        if not lst:
            lst = doc.selects('div.hidden-md hidden-lg >a')
        urls = []
        for a in lst:
            a["url"] = utils.absoluteUrl(url.url, a["href"])
            urls.append(a)

        return {"Urls": urls}


SimplifiedMain.startThread(MySpider())  # Start download

每个url的pdf将被下载到各自的文件夹中。
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils

class MySpider(Spider):
    name = 'download_pdf'
    allowed_domains = ["www.adb.org"]
    start_urls = [
        "https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"
    ]  # Entry page

    def afterResponse(self, response, url, error=None, extra=None):
        if not extra:
            print ("The version of library simplified_scrapy is too old, please update.")
            SimplifiedMain.setRunFlag(False)
            return
        try:
            path = './pdfs'
            # create folder start
            srcUrl = extra.get('srcUrl')
            if srcUrl:
                index = srcUrl.find('year/')
                year = ''
                if index > 0:
                    year = srcUrl[index + 5:]
                    index = year.find('?')
                    if index>0:
                        path = path + year[:index]
                        utils.createDir(path)
            # create folder end

            path = path + url[url.rindex('/'):]
            index = path.find('?')
            if index > 0: path = path[:index]
            flag = utils.saveResponseAsFile(response, path, fileType="pdf")
            if flag:
                return None
            else:  # If it's not a pdf, leave it to the frame
                return Spider.afterResponse(self, response, url, error, extra)
        except Exception as err:
            print(err)

    def extract(self, url, html, models, modelNames):
        doc = SimplifiedDoc(html)
        lst = doc.selects('div.list >a').contains("documents/", attr="href")
        if not lst:
            lst = doc.selects('div.hidden-md hidden-lg >a')
        urls = []
        for a in lst:
            a["url"] = utils.absoluteUrl(url.url, a["href"])
            # Set root url start
            a["srcUrl"] = url.get('srcUrl')
            if not a['srcUrl']:
                a["srcUrl"] = url.url
            # Set root url end
            urls.append(a)

        return {"Urls": urls}

    # Download again by resetting the URL. Called when you want to download again.
    def resetUrl(self):
        Spider.clearUrl(self)
        Spider.resetUrlsTest(self)

SimplifiedMain.startThread(MySpider())  # Start download

1
@crackers 这行代码用于设置文件存储路径:path = '/ pdfs' + url [url.rindex('/'):]。它可以存储在不同的文件夹中,但这有点复杂。我会尝试进行更改。 - dabingsou
1
@crackers 是的,它具有删除重复数据的功能。如果您想在测试期间再次运行,需要在运行之前删除文件夹/db下的文件。 - dabingsou
1
如果你有任何问题,请给我留言。没关系 :) @crackers - dabingsou
1
@crackers 不,它是根目录下的文件夹/db。 - dabingsou
1
不用谢。如果您将来有任何问题,只需给我留言即可。 - dabingsou
显示剩余16条评论

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接