BeautifulSoup 在 Instagram html 页面中查找
BeautifulSoup Find within an instagram html page
我在使用 bs4 查找内容时遇到问题。
我正在尝试在 html instagram 页面中自动查找一些网址,并且 (知道我是 python 菜鸟) 我可以找不到在 html 源代码中自动搜索 "display_url": http..."
之后示例中的 url 的方法。
I want to make my script search multiples url who appears as next as "display_url" and download them.
They have to be extracted as many times as they appear in the source code.
我尝试了 bs4 :
f = urllib.request.urlopen(fileURL)
htmlSource = f.read()
soup = bs(htmlSource, 'html.parser')
metaTag = soup.find_all('meta', {'property': 'og:image'})
imgURL = metaTag[0]['content']
urllib.request.urlretrieve(imgURL, 'fileName.jpg')
但我做不到soup.find_all(...
work/search。
有没有办法让我用 bs4 找到页面的这一部分?
非常感谢您的帮助。
这是我现在的小 (python) 代码示例:https://repl.it/@ClementJpn287/bs
<!––cropped...............-->
<body class="">
<span id="react-root"><svg width="50" height="50" viewBox="0 0 50 50" style="position:absolute;top:50%;left:50%;margin:-25px 0 0 -25px;fill:#c7c7c7">
<path
d="
<!––deleted part for privacy -->
" />
</svg></span>
<script type="text/javascript">
window._sharedData = {
"config": {
"csrf_token": "",
"viewer": {
<!––deleted part for privacy -->
"viewerId": ""
},
"supports_es6": true,
"country_code": "FR",
"language_code": "fr",
"locale": "fr_FR",
"entry_data": {
"PostPage": [{
"graphql": {
"shortcode_media": {
"__typename": "GraphSidecar",
<!––deleted part for privacy -->
"dimensions": {
"height": 1080,
"width": 1080
},
"gating_info": null,
"media_preview": null,
<--There's the important part that have to be extracted as many times it appear in the source code-->
"display_url": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"display_resources": [{
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 640,
"config_height": 640
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 750,
"config_height": 750
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 1080,
"config_height": 1080
}],
"is_video": false,
<!––cropped...............-->
my newest code
您可以找到合适的脚本标签并用正则表达式输出信息。我假设包含 window._sharedData =
的第一个脚本标签是合适的。您可以根据需要fiddle。
from bs4 import BeautifulSoup as bs
import re
html = '''
<html>
<head></head>
<body class="">
<span id="react-root">
<svg width="50" height="50" viewbox="0 0 50 50" style="position:absolute;top:50%;left:50%;margin:-25px 0 0 -25px;fill:#c7c7c7">
<path d="
<!––deleted part for privacy -->
" />
</svg></span>
<script type="text/javascript">
window._sharedData = {
"config": {
"csrf_token": "",
"viewer": {
<!––deleted part for privacy -->
"viewerId": ""
},
"supports_es6": true,
"country_code": "FR",
"language_code": "fr",
"locale": "fr_FR",
"entry_data": {
"PostPage": [{
"graphql": {
"shortcode_media": {
"__typename": "GraphSidecar",
<!––deleted part for privacy -->
"dimensions": {
"height": 1080,
"width": 1080
},
"gating_info": null,
"media_preview": null,
<--There's the important part that have to be extracted as many times it appear in the source code-->
"display_url": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"display_resources": [{
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 640,
"config_height": 640
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 750,
"config_height": 750
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 1080,
"config_height": 1080
}],
"is_video": false,</script>
</body>
</html>
'''
soup = bs(html, 'lxml')
scripts = soup.select('script[type="text/javascript"]')
for script in scripts:
if ' window._sharedData =' in script.text:
data = script.text
break
r = re.compile(r'"display_url":(.*)",')
print(r.findall(data))
感谢@t.h.adam,可以将上面的内容缩短为:
soup = bs(html, 'lxml')
r = re.compile(r'"display_url":(.*)",')
data = soup.find('script', text=r).text
print(r.findall(data))
程序进阶了,变成了这个样子:
thepage = urllib.request.urlopen(html)
soup = BeautifulSoup(thepage, "html.parser")
print(soup.title.text)
txt = soup.select('script[type="text/javascript"]')[3]
texte = txt.get_text()
f1 = open("tet.txt", 'w')
f1.write(texte)
f1.close()
with open('tet.txt','r') as f:
data=''.join(f.readlines())
print(data[data.index('"display_url":"'):data.index('","display_resources":')+1])
但是现在出现了新的东西:
- 如何使程序的发现 url 部分(第 10、11 行)重复,只要 (' "display_url":" to -->","display_resources": ')出现在tet.txt 文件 ?
- 可以使用while循环但是如何让它重复这个过程?
问题已解决
这是在 iOS 上使用 Pythonista 3 从 instagram url 下载多张图片的代码:
from sys import argv
import urllib
import urllib.request
from bs4 import BeautifulSoup
import re
import photos
import clipboard
thepage = "your url"
#p.1
thepage = urllib.request.urlopen(html)
soup = BeautifulSoup(thepage, "html.parser")
print(soup.title.text)
txt = soup.select('script[type="text/javascript"]')[3]
texte = txt.get_text()
fille = open("tet.txt", 'w')
fille.write(texte)
fille.close()
#p.2
g = open('tet.txt','r')
data=''.join(g.readlines())
le1 = 0
le2 = 0
hturl = open('url.html', 'w')
still_looking = True
while still_looking:
still_looking = False
dat = data.find('play_url', le1)
det = data.find('play_resources', le2)
if dat >= le1:
#urls.append(dat)
le1 = dat + 1
still_looking = True
if det >= le2:
hturl.write(data[dat:det])
le2 = det + 1
still_looking = True
hturl.close()
#p.3
hturl2 = open('url.html', 'r')
dete = ''.join(hturl2.readlines())
le11 = 0
le22 = 0
urls = []
still_looking2 = True
while still_looking2:
still_looking2 = False
dat2 = dete.find('https://scontent-', le11)
det2 = dete.find('","dis', le22)
if dat2 >= le11:
urls.append(dat2)
le11 = dat2 + 1
still_looking2 = True
if det2 >= le22:
urls.append(dete[dat2:det2])
le22 = det2 + 1
still_looking2 = True
hturl2.close()
#p.4
imgs = len(urls)
nbind = imgs
nbindr = 3
images = 1
while nbindr < imgs:
urllib.request.urlretrieve(urls[nbindr], 'photo.jpg')
photos.create_image_asset('photo.jpg')
print ('Image ' + str(images) + ' downloaded')
nbindr = nbindr +2
images += 1
print("OK")
它有点挑剔,但它的工作速度也很快。
感谢您的帮助。
我在使用 bs4 查找内容时遇到问题。
我正在尝试在 html instagram 页面中自动查找一些网址,并且 (知道我是 python 菜鸟) 我可以找不到在 html 源代码中自动搜索 "display_url": http..."
之后示例中的 url 的方法。
I want to make my script search multiples url who appears as next as "display_url" and download them. They have to be extracted as many times as they appear in the source code.
我尝试了 bs4 :
f = urllib.request.urlopen(fileURL)
htmlSource = f.read()
soup = bs(htmlSource, 'html.parser')
metaTag = soup.find_all('meta', {'property': 'og:image'})
imgURL = metaTag[0]['content']
urllib.request.urlretrieve(imgURL, 'fileName.jpg')
但我做不到soup.find_all(...
work/search。
有没有办法让我用 bs4 找到页面的这一部分?
非常感谢您的帮助。
这是我现在的小 (python) 代码示例:https://repl.it/@ClementJpn287/bs
<!––cropped...............-->
<body class="">
<span id="react-root"><svg width="50" height="50" viewBox="0 0 50 50" style="position:absolute;top:50%;left:50%;margin:-25px 0 0 -25px;fill:#c7c7c7">
<path
d="
<!––deleted part for privacy -->
" />
</svg></span>
<script type="text/javascript">
window._sharedData = {
"config": {
"csrf_token": "",
"viewer": {
<!––deleted part for privacy -->
"viewerId": ""
},
"supports_es6": true,
"country_code": "FR",
"language_code": "fr",
"locale": "fr_FR",
"entry_data": {
"PostPage": [{
"graphql": {
"shortcode_media": {
"__typename": "GraphSidecar",
<!––deleted part for privacy -->
"dimensions": {
"height": 1080,
"width": 1080
},
"gating_info": null,
"media_preview": null,
<--There's the important part that have to be extracted as many times it appear in the source code-->
"display_url": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"display_resources": [{
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 640,
"config_height": 640
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 750,
"config_height": 750
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 1080,
"config_height": 1080
}],
"is_video": false,
<!––cropped...............-->
my newest code
您可以找到合适的脚本标签并用正则表达式输出信息。我假设包含 window._sharedData =
的第一个脚本标签是合适的。您可以根据需要fiddle。
from bs4 import BeautifulSoup as bs
import re
html = '''
<html>
<head></head>
<body class="">
<span id="react-root">
<svg width="50" height="50" viewbox="0 0 50 50" style="position:absolute;top:50%;left:50%;margin:-25px 0 0 -25px;fill:#c7c7c7">
<path d="
<!––deleted part for privacy -->
" />
</svg></span>
<script type="text/javascript">
window._sharedData = {
"config": {
"csrf_token": "",
"viewer": {
<!––deleted part for privacy -->
"viewerId": ""
},
"supports_es6": true,
"country_code": "FR",
"language_code": "fr",
"locale": "fr_FR",
"entry_data": {
"PostPage": [{
"graphql": {
"shortcode_media": {
"__typename": "GraphSidecar",
<!––deleted part for privacy -->
"dimensions": {
"height": 1080,
"width": 1080
},
"gating_info": null,
"media_preview": null,
<--There's the important part that have to be extracted as many times it appear in the source code-->
"display_url": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"display_resources": [{
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 640,
"config_height": 640
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 750,
"config_height": 750
}, {
"src": "https://scontent-cdt1-1.cdninstagram.com/vp/",
"config_width": 1080,
"config_height": 1080
}],
"is_video": false,</script>
</body>
</html>
'''
soup = bs(html, 'lxml')
scripts = soup.select('script[type="text/javascript"]')
for script in scripts:
if ' window._sharedData =' in script.text:
data = script.text
break
r = re.compile(r'"display_url":(.*)",')
print(r.findall(data))
感谢@t.h.adam,可以将上面的内容缩短为:
soup = bs(html, 'lxml')
r = re.compile(r'"display_url":(.*)",')
data = soup.find('script', text=r).text
print(r.findall(data))
程序进阶了,变成了这个样子:
thepage = urllib.request.urlopen(html)
soup = BeautifulSoup(thepage, "html.parser")
print(soup.title.text)
txt = soup.select('script[type="text/javascript"]')[3]
texte = txt.get_text()
f1 = open("tet.txt", 'w')
f1.write(texte)
f1.close()
with open('tet.txt','r') as f:
data=''.join(f.readlines())
print(data[data.index('"display_url":"'):data.index('","display_resources":')+1])
但是现在出现了新的东西:
- 如何使程序的发现 url 部分(第 10、11 行)重复,只要 (' "display_url":" to -->","display_resources": ')出现在tet.txt 文件 ?
- 可以使用while循环但是如何让它重复这个过程?
问题已解决
这是在 iOS 上使用 Pythonista 3 从 instagram url 下载多张图片的代码:
from sys import argv
import urllib
import urllib.request
from bs4 import BeautifulSoup
import re
import photos
import clipboard
thepage = "your url"
#p.1
thepage = urllib.request.urlopen(html)
soup = BeautifulSoup(thepage, "html.parser")
print(soup.title.text)
txt = soup.select('script[type="text/javascript"]')[3]
texte = txt.get_text()
fille = open("tet.txt", 'w')
fille.write(texte)
fille.close()
#p.2
g = open('tet.txt','r')
data=''.join(g.readlines())
le1 = 0
le2 = 0
hturl = open('url.html', 'w')
still_looking = True
while still_looking:
still_looking = False
dat = data.find('play_url', le1)
det = data.find('play_resources', le2)
if dat >= le1:
#urls.append(dat)
le1 = dat + 1
still_looking = True
if det >= le2:
hturl.write(data[dat:det])
le2 = det + 1
still_looking = True
hturl.close()
#p.3
hturl2 = open('url.html', 'r')
dete = ''.join(hturl2.readlines())
le11 = 0
le22 = 0
urls = []
still_looking2 = True
while still_looking2:
still_looking2 = False
dat2 = dete.find('https://scontent-', le11)
det2 = dete.find('","dis', le22)
if dat2 >= le11:
urls.append(dat2)
le11 = dat2 + 1
still_looking2 = True
if det2 >= le22:
urls.append(dete[dat2:det2])
le22 = det2 + 1
still_looking2 = True
hturl2.close()
#p.4
imgs = len(urls)
nbind = imgs
nbindr = 3
images = 1
while nbindr < imgs:
urllib.request.urlretrieve(urls[nbindr], 'photo.jpg')
photos.create_image_asset('photo.jpg')
print ('Image ' + str(images) + ' downloaded')
nbindr = nbindr +2
images += 1
print("OK")
它有点挑剔,但它的工作速度也很快。 感谢您的帮助。