Given the following code:
# import the module
import bs4 as bs
import urllib.request
import re
masterURL = 'http://www.metrolyrics.com/top100.html'
sauce = urllib.request.urlopen(masterURL).read()
soup = bs.BeautifulSoup(sauce,'lxml')
for div in soup.findAll('ul', {'class': 'song-list'}):
for span in div:
for link in span:
for a in link:
print(a)
I can parse multiple divs, and i get a result as follows :
My question is instead of getting the full contents of the div how can I only return the highlighted portion, the URL of the Href?
Try this. You need to specify the right class to fetch the urls connected to it.
from bs4 import BeautifulSoup
import urllib.request
masterURL = 'http://www.metrolyrics.com/top100.html'
sauce = urllib.request.urlopen(masterURL).read()
soup = BeautifulSoup(sauce,'lxml')
for div in soup.find_all(class_='subtitle'):
print(div.get("href"))
Output:
http://www.metrolyrics.com/charles-goose-lyrics.html
http://www.metrolyrics.com/param-singh-lyrics.html
http://www.metrolyrics.com/westlife-lyrics.html
http://www.metrolyrics.com/luis-fonsi-lyrics.html
http://www.metrolyrics.com/grease-lyrics.html
http://www.metrolyrics.com/shanti-dope-lyrics.html
and so on ---
if 'href' in a.attrs:
a.attrs['href']
this will give you what you need.
Related
I want to append href from this class:
<a class="_2UzuFa" href="/awg-all-weather-gear-solid-men-polo-neck-black-grey-t-shirt/p/itm19ae710c69708?pid=TSHGFKPZNGYMP2FC&lid=LSTTSHGFKPZNGYMP2FCZPKPX3&marketplace=FLIPKART&store=clo%2Fash%2Fank%2Fedy&srno=b_1_38&otracker=browse&fm=organic&iid=en_7%2Fz2ZgorbMeTmb%2F05oING%2BjZoEV8lwngUWQpEDanwo443TzRZ2XfvI9qIOekIcXbWiZZReg3l4w%2Fa03968TVxw%3D%3D&ppt=None&ppn=None&ssid=3o5k6hnkq80000001660826655971"J5 -o7Q4n"></a>
my code =
for item in class:
containt = soup.find('href)
print(containt)
its not working
Do not use reserved keywords like class as variable name and to extract the hrefs value from a tag use .get('href').
Example
from bs4 import BeautifulSoup
html='''<a class="_2UzuFa" href="/awg-all-weather-gear-solid-men-polo-neck-black-grey-t-shirt/p/itm19ae710c69708?pid=TSHGFKPZNGYMP2FC&lid=LSTTSHGFKPZNGYMP2FCZPKPX3&marketplace=FLIPKART&store=clo%2Fash%2Fank%2Fedy&srno=b_1_38&otracker=browse&fm=organic&iid=en_7%2Fz2ZgorbMeTmb%2F05oING%2BjZoEV8lwngUWQpEDanwo443TzRZ2XfvI9qIOekIcXbWiZZReg3l4w%2Fa03968TVxw%3D%3D&ppt=None&ppn=None&ssid=3o5k6hnkq80000001660826655971"J5 -o7Q4n"></a>'''
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
for a in soup.select('a'):
print(a.get('href'))
Output
/awg-all-weather-gear-solid-men-polo-neck-black-grey-t-shirt/p/itm19ae710c69708?pid=TSHGFKPZNGYMP2FC&lid=LSTTSHGFKPZNGYMP2FCZPKPX3&marketplace=FLIPKART&store=clo%2Fash%2Fank%2Fedy&srno=b_1_38&otracker=browse&fm=organic&iid=en_7%2Fz2ZgorbMeTmb%2F05oING%2BjZoEV8lwngUWQpEDanwo443TzRZ2XfvI9qIOekIcXbWiZZReg3l4w%2Fa03968TVxw%3D%3D&ppt=None&ppn=None&ssid=3o5k6hnkq80000001660826655971
Example based on flipkart
from bs4 import BeautifulSoup
import requests
url='https://www.flipkart.com/mens-tshirts/awg-all-weather-gear~brand/pr?sid=clo,ash,ank,edy&marketplace=FLIPKART&otracker=product_breadCrumbs_AWG+All+Weather+Gear+Men%27s+T-shirts'
soup =BeautifulSoup(requests.get(url).text)
for e in soup.select('a._2UzuFa'):
print('https://www.flipkart.com'+e.get('href'))
A while ago I used the following code to get window._sharedData; but the same code just now has no way, what should I do
If I change script to div it can work but I need is use script
code.py
from bs4 import BeautifulSoup
html1 = '<h1><script>window._sharedData;</script></h1>'
soup = BeautifulSoup(html1)
print(soup.find('script').text)
Add html.parser or lxml and call .string instead .text
from bs4 import BeautifulSoup
html = '<h1><script>window._sharedData;</script></h1>'
soup = BeautifulSoup(html, 'html.parser')
print(soup.find('script').string)
You should use BeautifulSoup(html1, 'lxml') instead of BeautifulSoup(html1). If Output is empty, you will use .string instead of .text. You can try it:
from bs4 import BeautifulSoup
html1 = '<h1><script>window._sharedData;</script></h1>'
soup = BeautifulSoup(html1, 'lxml')
print(soup.find('script').text)
or
print(soup.find('script').string)
Output will be:
window._sharedData;
I'm trying to scrape Post and images from this facebook profile; https://www.facebook.com/carlostablanteoficial and getting nothing when trying to reach the actual post text with this code:
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
html = urlopen("https://www.facebook.com/carlostablanteoficial")
res = BeautifulSoup(html.read(),"html5lib");
resdiv = res.div
post = resdiv.findAll('div', class_='text_exposed_root')
print(post)
This will return many results:
import requests
from bs4 import BeautifulSoup
data = requests.get("https://www.facebook.com/carlostablanteoficial")
soup = BeautifulSoup(data.text, 'html.parser')
for div in soup.find_all('div'):
print(div)
to search for a specific class, change the loop to:
for div in soup.find_all('div', {'class', 'text_exposed_root'}):
print(div)
but when I tried it returned nothing, meaning there is no div with that class on the page
I want to extract the price off the website
However, I'm having trouble locating the class type.
on this website
we see that the price for this course is $5141. When I check the source code the class for the price should be "field-items".
from bs4 import BeautifulSoup
import pandas as pd
import requests
url =
"https://www.learningconnection.philips.com/en/course/pinnacle%C2%B3-
advanced-planning-education"
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
price = soup.find(class_='field-items')
print(price)
However when I ran the code I got a description of the course instead of the price..not sure what I did wrong. Any help appreciated, thanks!
There are actually several "field-item even" classes on your webpage so you have to pick the one inside the good class. Here's the code :
from bs4 import BeautifulSoup
import pandas as pd
import requests
url = "https://www.learningconnection.philips.com/en/course/pinnacle%C2%B3-advanced-planning-education"
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
section = soup.find(class_='field field-name-field-price field-type-number-decimal field-label-inline clearfix view-mode-full')
price = section.find(class_="field-item even").text
print(price)
And the result :
5141.00
With bs4 4.7.1 + you can use :contains to isolate the appropriate preceeding tag then use adjacent sibling and descendant combinators to get to the target
import requests
from bs4 import BeautifulSoup as bs
r = requests.get('https://www.learningconnection.philips.com/en/course/pinnacle%C2%B3-advanced-planning-education')
soup = bs(r.content, 'lxml')
print(soup.select_one('.field-label:contains("Price:") + div .field-item').text)
This
.field-label:contains("Price:")
looks for an element with class field-label, the . is a css class selector, which contains the text Price:. Then the + is an adjacent sibling combinator specifying to get the adjacent div. The .field-item (space dot field-item) is a descendant combinator (the space) and class selector for a child of the adjacent div having class field-item. select_one returns the first match in the DOM for the css selector combination.
Reading:
css selectors
To get the price you can try using .select() which is precise and less error prone.
import requests
from bs4 import BeautifulSoup
url = "https://www.learningconnection.philips.com/en/course/pinnacle%C2%B3-advanced-planning-education"
html = requests.get(url)
soup = BeautifulSoup(html.text, 'html.parser')
price = soup.select_one("[class*='field-price'] .even").text
print(price)
Output:
5141.00
Actually the class I see, using Firefox inspector is : field-item even, it's where the text is:
<div class="field-items"><div class="field-item even">5141.00</div></div>
But you need to change a little bit your code:
price = soup.find_all("div",{"class":'field-item even'})[2]
There are more than one "field-item even" labeled class, price is not the first one.
I have a number of facebook groups that I would like to get the count of the members of. An example would be this group: https://www.facebook.com/groups/347805588637627/
I have looked at inspect element on the page and it is stored like so:
<span id="count_text">9,413 members</span>
I am trying to get "9,413 members" out of the page. I have tried using BeautifulSoup but cannot work it out.
Thanks
Edit:
from bs4 import BeautifulSoup
import requests
url = "https://www.facebook.com/groups/347805588637627/"
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, "html.parser")
span = soup.find("span", id="count_text")
print(span.text)
In case there is more than one span tag in the page:
from bs4 import BeautifulSoup
soup = BeautifulSoup(your_html_input, 'html.parser')
span = soup.find("span", id="count_text")
span.text
You can use the text attribute of the parsed span:
>>> from bs4 import BeautifulSoup
>>> soup = BeautifulSoup('<span id="count_text">9,413 members</span>', 'html.parser')
>>> soup.span
<span id="count_text">9,413 members</span>
>>> soup.span.text
'9,413 members'
If you have more than one span tag you can try this
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
tags = soup('span')
for tag in tags:
print(tag.contents[0])
Facebook uses javascrypt to prevent bots from scraping. You need to use selenium to extract data on python.