如何使用 python 请求库从 Web 下载 PDF 文件
How to download PDF file from web using python requests library
尝试使用请求模块从网站下载一些 pdf 文件,但我不断收到下面列出的错误。我看到几篇文章提到对 pdf 文件使用 response.content
而不是 response.text
,但它仍然会产生错误。不确定如何解决这个问题。
def scrape_website(link):
try:
print("getting content")
cert = requests.certs.where()
page = requests.get(link, verify=cert, headers={"User-Agent": "Mozilla/5.0 (X11; CrOS x86_64 12871.102.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.141 Safari/537.36"})
print(page)
if ".pdf" in link:
print("the content is a pdf file. downloading..")
return page.content
return page.text
except Exception as x:
print(x)
return ''
statement_page = scrape_website(link)
with open(filepath, 'w+', encoding="utf-8") as f:
print("writing page")
f.write(statement_page)
f.close()
<ipython-input-42-1e4771d32073> in save_html_page(page, path, filename)
13 with open(filepath, 'w+', encoding="utf-8") as f:
14 print("writing page")
---> 15 f.write(page)
16 f.close()
17
TypeError: write() argument must be str, not bytes
有时我也需要以编程方式下载东西。我只是用这个:
import requests
response = requests.get("https://link_to_thing.pdf")
file = open("myfile.pdf", "wb")
file.write(response.content)
file.close()
您也可以使用os
包与wget
一起下载:
import os
url = 'https://link_to_pdf.pdf'
name = 'myfile.pdf'
os.system('wget {} -O {}'.format(url,name))
这是我用过一次的例子,当你尝试下载大的 pdf 文件时它非常方便:
import requests
import sys
url = 'url'
filename = 'filename'
# creating a connection to the pdf
print("Creating the connection ...")
with requests.get(url, stream=True) as r:
if r.status_code != 200:
print("Could not download the file '{}'\nError Code : {}\nReason : {}\n\n".format(
url, r.status_code, r.reason), file=sys.stderr)
else:
# Storing the file as a pdf
print("Saving the pdf file :\n\"{}\" ...".format(filename))
with open(filename, 'wb') as f:
try:
total_size = int(r.headers['Content-length'])
saved_size_pers = 0
moversBy = 8192*100/total_size
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
saved_size_pers += moversBy
print("\r=>> %.2f%%" % (
saved_size_pers if saved_size_pers <= 100 else 100.0), end='')
print(end='\n\n')
except Exception:
print("==> Couldn't save : {}\".format(filename))
f.flush()
r.close()
r.close()
这使用 : iter_content()
下载然后通过 chunck 保存 pdf 块。
尝试使用请求模块从网站下载一些 pdf 文件,但我不断收到下面列出的错误。我看到几篇文章提到对 pdf 文件使用 response.content
而不是 response.text
,但它仍然会产生错误。不确定如何解决这个问题。
def scrape_website(link):
try:
print("getting content")
cert = requests.certs.where()
page = requests.get(link, verify=cert, headers={"User-Agent": "Mozilla/5.0 (X11; CrOS x86_64 12871.102.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.141 Safari/537.36"})
print(page)
if ".pdf" in link:
print("the content is a pdf file. downloading..")
return page.content
return page.text
except Exception as x:
print(x)
return ''
statement_page = scrape_website(link)
with open(filepath, 'w+', encoding="utf-8") as f:
print("writing page")
f.write(statement_page)
f.close()
<ipython-input-42-1e4771d32073> in save_html_page(page, path, filename)
13 with open(filepath, 'w+', encoding="utf-8") as f:
14 print("writing page")
---> 15 f.write(page)
16 f.close()
17
TypeError: write() argument must be str, not bytes
有时我也需要以编程方式下载东西。我只是用这个:
import requests
response = requests.get("https://link_to_thing.pdf")
file = open("myfile.pdf", "wb")
file.write(response.content)
file.close()
您也可以使用os
包与wget
一起下载:
import os
url = 'https://link_to_pdf.pdf'
name = 'myfile.pdf'
os.system('wget {} -O {}'.format(url,name))
这是我用过一次的例子,当你尝试下载大的 pdf 文件时它非常方便:
import requests
import sys
url = 'url'
filename = 'filename'
# creating a connection to the pdf
print("Creating the connection ...")
with requests.get(url, stream=True) as r:
if r.status_code != 200:
print("Could not download the file '{}'\nError Code : {}\nReason : {}\n\n".format(
url, r.status_code, r.reason), file=sys.stderr)
else:
# Storing the file as a pdf
print("Saving the pdf file :\n\"{}\" ...".format(filename))
with open(filename, 'wb') as f:
try:
total_size = int(r.headers['Content-length'])
saved_size_pers = 0
moversBy = 8192*100/total_size
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
saved_size_pers += moversBy
print("\r=>> %.2f%%" % (
saved_size_pers if saved_size_pers <= 100 else 100.0), end='')
print(end='\n\n')
except Exception:
print("==> Couldn't save : {}\".format(filename))
f.flush()
r.close()
r.close()
这使用 : iter_content()
下载然后通过 chunck 保存 pdf 块。