使用Python从网站下载所有PDF文件

13

我尝试过按照几个在线指南构建脚本。该脚本能够识别并下载网站上的所有PDF文件,以免我手动操作。以下是迄今为止我的代码:

from urllib import request
from bs4 import BeautifulSoup
import re
import os
import urllib

# connect to website and get list of all pdfs
url="http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016.html"
response = request.urlopen(url).read()
soup= BeautifulSoup(response, "html.parser")     
links = soup.find_all('a', href=re.compile(r'(.pdf)'))


# clean the pdf link names
url_list = []
for el in links:
    url_list.append(("http://www.gatsby.ucl.ac.uk/teaching/courses/" + el['href']))
#print(url_list)


# download the pdfs to a specified location
for url in url_list:
    print(url)
    fullfilename = os.path.join('E:\webscraping', url.replace("http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016/", "").replace(".pdf",""))
    print(fullfilename)
    request.urlretrieve(url, fullfilename)

这段代码可以看起来找到了所有的pdf文件(取消注释 print(url_list) 可以查看),但是在下载阶段出现了错误。具体地,我收到了以下错误信息,但我无法理解出错的原因:

E:\webscraping>python get_pdfs.py
http://www.gatsby.ucl.ac.uk/teaching/courses/http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016/cribsheet.pdf
E:\webscraping\http://www.gatsby.ucl.ac.uk/teaching/courses/cribsheet
Traceback (most recent call last):
  File "get_pdfs.py", line 26, in <module>
    request.urlretrieve(url, fullfilename)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 248, in urlretrieve
    with contextlib.closing(urlopen(url, data)) as fp:
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 223, in urlopen
    return opener.open(url, data, timeout)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 532, in open
    response = meth(req, response)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 642, in http_response
    'http', request, response, code, msg, hdrs)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 570, in error
    return self._call_chain(*args)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 504, in _call_chain
    result = func(*args)
  File "C:\Users\User\Anaconda3\envs\snake\lib\urllib\request.py", line 650, in http_error_default
    raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 404: Not Found

有人可以帮我吗?

5个回答

37

看看以下的实现。我使用了requests模块代替urllib进行了下载。此外,我使用了.select()方法而不是使用re来避免使用.find_all()

import os
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup

url = "http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016.html"

#If there is no such folder, the script will create one automatically
folder_location = r'E:\webscraping'
if not os.path.exists(folder_location):os.mkdir(folder_location)

response = requests.get(url)
soup= BeautifulSoup(response.text, "html.parser")     
for link in soup.select("a[href$='.pdf']"):
    #Name the pdf files using the last portion of each link which are unique in this case
    filename = os.path.join(folder_location,link['href'].split('/')[-1])
    with open(filename, 'wb') as f:
        f.write(requests.get(urljoin(url,link['href'])).content)

1
谢谢。这很简洁明了。这是我第一次网页抓取的经验 - 通常它会很慢吗?每个文件需要几秒钟的时间?谢谢。 - user11128
@SIM,我该如何给下载的PDF文件命名亚洲字符(URL的一部分)?我看到了这个链接,但不确定如何将其融入上面的代码:https://qiita.com/mix/items/87d094414e46f857de45 - mLstudent33
@SIM 这个可以用于提供的链接。我正在尝试另一个页面,我知道那里有几个指向PDF文档的链接,但我只得到了2个。 - Jabernet

4

已经包含了导致404错误的服务器地址的几个链接。同时,您不应该从文件名中删除.pdf,因为这会在保存时不带扩展名。

from urllib import request
from bs4 import BeautifulSoup
import re
import os
import urllib

# connect to website and get list of all pdfs
url="http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016.html"
response = request.urlopen(url).read()
soup= BeautifulSoup(response, "html.parser")     
links = soup.find_all('a', href=re.compile(r'(.pdf)'))


# clean the pdf link names
url_list = []
for el in links:
if(el['href'].startswith('http')):
    url_list.append(el['href'])
else:
    url_list.append("http://www.gatsby.ucl.ac.uk/teaching/courses/" + el['href'])

print(url_list)


# download the pdfs to a specified location
for url in url_list:
    print(url)
    fullfilename = os.path.join('E:\webscraping', url.replace("http://www.gatsby.ucl.ac.uk/teaching/courses/ml1-2016/", ""))
    print(fullfilename)
    request.urlretrieve(url, fullfilename)

如果我们提供基本的URL,这个方法会下载整个网站域名下的所有PDF文件吗?还是只下载一个网页上的PDF文件? - x89

0

根据我的需求对@SIM的答案进行了一些变化:

from urllib import request
from bs4 import BeautifulSoup
import re
import os
import urllib

# connect to website and get list of all pdfs
url="http://openclassroom.stanford.edu/MainFolder/DocumentPage.php?course=Compilers&doc=docs/slides.html"
pdfPath = "http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/"
response = request.urlopen(url).read()
soup= BeautifulSoup(response, "html.parser")     
links = soup.find_all('a', href=re.compile(r'(.pdf)'))


# clean the pdf link names
url_list = []
for el in links:
    if(el['href'].startswith('http')):
        url_list.append(el['href'])
    else:
        url_list.append(pdfPath + el['href'])

print(f'url_list: {url_list}\n')


# download the pdfs to a specified location
for url in url_list:
    print(f'urL: {url}\n')
    fullfilename = os.path.join(r'standfordPdfs', url.replace(pdfPath, ""))
    print(f'fullfilename: {fullfilename}')
    request.urlretrieve(url, fullfilename)
    

0

我根据@SIM's answer编写了一个新的脚本,并添加了argparse。完整代码如下:

import os
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import argparse

#%% Example
# one pdf
#   python all_pdf_dl.py -l https://memento.epfl.ch/academic-calendar/ --save-here
# many pdfs
#   python all_pdf_dl.py -l https://idsc.ethz.ch/education/lectures/recursive-estimation.html

#%% Functions
def all_pdf_download(args):
    base_url = args.link
    if args.save_here:
        folder_path = os.getcwd()
    else:
        folder_path = args.folder_path
        if not os.path.exists(args.folder_path):os.mkdir(args.folder_path)
    print("====== 1. Set savepath: {} ======".format(folder_path))
    print("====== 2. Start searching ======")
    #response = requests.get(base_url)
    response = requests.get(base_url, headers={'User-Agent': 'Custom'})
    soup= BeautifulSoup(response.text, "html.parser")
    search_res = soup.select("a[href$='.pdf']")
    print("{} files found!!!".format(len(search_res)))
    print("====== 3. Start downloading ======")
    for counter, link in enumerate(search_res):
        #Name the pdf files using the last portion of each link which are unique in this case
        filename = link['href'].split('/')[-1]
        file_save_path = os.path.join(folder_path,link['href'].split('/')[-1])
        if args.print_all:
            print("[{}/{}] {}".format(counter+1, len(search_res), filename))
        with open(file_save_path, 'wb') as f:
            f.write(requests.get(urljoin(base_url,link['href'])).content)
    print("====== 4. Finished!!! ======")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Test argparse')
    ####################################
    ############ ALL OPTION ############
    ## Main option
    # -l/--link
    parser.add_argument('-l', '--link', required=True, type=str,
                        help='write down site name')
    # --print-all
    parser.add_argument('--print-all', dest='print_all', action='store_true',
                        help="print all filename")
    parser.set_defaults(print_all=True)
    # --save-here
    parser.add_argument('--save-here', dest='save_here', action='store_true',
                        help="save files here")
    parser.set_defaults(save_here=False)
    # --save--folder
    # default setting -> Downloads/ in user’s home directory obtained by (os.path.expanduser('~'))
    parser.add_argument('-f', '--folder_path', default=r""+os.path.join(os.path.expanduser('~'), "Downloads"), 
                        type=str, help='save files in the given folder')

    ########################################
    ############ PARSING OPTION ############
    args = parser.parse_args()
    all_pdf_download(args)

更多细节和更新,请参考我的Gist-hibetterheyj/all_pdf_dl.py

祝一切顺利!


如果我们提供基础URL,这个方法会下载网页上所有的PDF文件还是整个网站域名下的所有文件? - x89

-1
通常,上述答案应该有效。但是,您应该评估您尝试使用的网页的HTML源代码。例如,某些网页可能在元标记中具有og_url属性,而其他网页则可能没有。如果您正在使用安全网站(比如您的大学课程网页),则可能会出现这种情况。在这种情况下,您将需要以不同的方式提取PDF链接。
您可以在此找到一个很好的解释和解决方案:

https://medium.com/@dementorwriter/notesdownloader-use-web-scraping-to-download-all-pdfs-with-python-511ea9f55e48


网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接