I need help with a problem... I am doing a code for know the content of a tag but... What can I do for take the content if it have got a id?
from bs4 import BeautifulSoup
import urllib2
code = '<span class="vi-is1-prcp" id="v4-27"> 15,00 EUR </span>'
soup = BeautifulSoup(code)
price = soup.find('a', id='v4-27') # <-- PROBLEM
print price
if that is the html code then you should replace the 'a' tag with a 'span' tag. It should look something like this...
...
price = soup.find('span',id="v4-27")
print price #optional price.string will give you just the 15,00 EUR
#instead of the entire html line
Related
I'm trying to scrape rotten tomatoes with bs4
My aim is to find all a hrefs from the table but i cannot do it can you help me?
https://www.rottentomatoes.com/top/bestofrt/top_100_action__adventure_movies/
my code is
from urllib import request
from bs4 import BeautifulSoup as BS
import re
import pandas as pd
url = 'https://www.rottentomatoes.com/top/bestofrt'
html = request.urlopen(url)
bs = BS(html.read(), 'html.parser')
tags = bs.find_all('a', {'class':'articleLink unstyled'})[7:]
links = ['https://www.rottentomatoes.com' + tag['href'] for tag in tags]
########################################### links ############################################################################
webpages = []
for link in reversed(links):
print(link)
html = request.urlopen(link)
bs = BS(html.read(), 'html.parser')
tags = bs.find_all('a', {'class':'unstyled articleLink'})[43:]
links = ['https://www.rottentomatoes.com' + tag['href'] for tag in tags]
webpages.extend(links)
print(webpages)
I put a limit to 43 in order to avoid useless links except for movies but it is a short term solution and does not help
I need to find an exact solution on how to scrape from table without scrape irrelevant information
thanks
Just grab the main table and then extract all the <a> tags.
For example:
import requests
from bs4 import BeautifulSoup
rotten_tomatoes_url = 'https://www.rottentomatoes.com/top/bestofrt/top_100_action__adventure_movies/'
action_and_adventure = [
f"https://www.rottentomatoes.com{link.get('href')}"
for link in
BeautifulSoup(
requests.get(rotten_tomatoes_url).text,
"lxml",
)
.find("table", class_="table")
.find_all("a")
]
print(len(action_and_adventure))
print("\n".join(action_and_adventure[:10]))
Output (all 100 links to movies):
100
https://www.rottentomatoes.com/m/black_panther_2018
https://www.rottentomatoes.com/m/avengers_endgame
https://www.rottentomatoes.com/m/mission_impossible_fallout
https://www.rottentomatoes.com/m/mad_max_fury_road
https://www.rottentomatoes.com/m/spider_man_into_the_spider_verse
https://www.rottentomatoes.com/m/wonder_woman_2017
https://www.rottentomatoes.com/m/logan_2017
https://www.rottentomatoes.com/m/coco_2017
https://www.rottentomatoes.com/m/dunkirk_2017
https://www.rottentomatoes.com/m/star_wars_the_last_jedi
try this:
tags = bs.find_all(name='a', {'class':'unstyled articleLink'})[43:]
content = soup.find_all("div", id = "ctl00_ContentPlaceHolder1_ctl03_divHO")
for book in content:
stock = book.find('I', {'class'= "Item_Price10"}).text
print (stock)
I would like to get stock price by using BS4 by finding out the value in CONTENT, but the code does not work well. Please help me, thank you in advance
The Item_price10 class appears to be part of a <td> tag, so you could try something like:
import requests
from bs4 import BeautifulSoup
req = requests.get("https://s.cafef.vn/Lich-su-giao-dich-FPT-1.chn")
soup = BeautifulSoup(req.content, "html.parser")
for div in soup.find_all("div", id = "ctl00_ContentPlaceHolder1_ctl03_divHO"):
for tr in div.find_all('tr'):
for td in tr.find_all('td', {'class': "Item_Price10"}):
print(td.text)
print()
This would display something starting like:
95.70
95.70
3,325,700
319,480,000,000
1,243,600
95.50
97.00
95.40
0
0
0
94.70
94.70
...
from bs4 import BeautifulSoup
import requests
def kijiji():
source = requests.get('https://www.kijiji.ca/b-mens-shoes/markham-york-region/c15117001l1700274').text
soup = BeautifulSoup(source,'lxml')
b = soup.find('div', class_='price')
for link in soup.find_all('a',class_ = 'title'):
a = link.get('href')
fulllink = 'http://kijiji.ca'+a
print(fulllink)
b = soup.find('div', class_='price')
print(b.prettify())
kijiji()
Usage of this is to sum up all the different kinds of items sold in kijiji and pair them up with a price.
But I can't seem to find anyway to increment what beautiful soup is finding with a class of price, and I'm stuck with the first price. Find_all doesn't work either as it just prints out the whole blob instead of grouping it together with each item.
If you have Beautiful soup 4.7.1 or above you can use following css selector select() which is much faster.
code:
import requests
from bs4 import BeautifulSoup
res=requests.get("https://www.kijiji.ca/b-mens-shoes/markham-york-region/c15117001l1700274").text
soup=BeautifulSoup(res,'html.parser')
for item in soup.select('.info-container'):
fulllink = 'http://kijiji.ca' + item.find_next('a', class_='title')['href']
print(fulllink)
price=item.select_one('.price').text.strip()
print(price)
Or to use find_all() use below code block
import requests
from bs4 import BeautifulSoup
res=requests.get("https://www.kijiji.ca/b-mens-shoes/markham-york-region/c15117001l1700274").text
soup=BeautifulSoup(res,'html.parser')
for item in soup.find_all('div',class_='info-container'):
fulllink = 'http://kijiji.ca' + item.find_next('a', class_='title')['href']
print(fulllink)
price=item.find_next(class_='price').text.strip()
print(price)
Congratulations on finding the answer. I'll give you another solution for reference only.
import requests
from simplified_scrapy.simplified_doc import SimplifiedDoc
def kijiji():
url = 'https://www.kijiji.ca/b-mens-shoes/markham-york-region/c15117001l1700274'
source = requests.get(url).text
doc = SimplifiedDoc(source)
infos = doc.getElements('div',attr='class',value='info-container')
for info in infos:
price = info.select('div.price>text()')
a = info.select('a.title')
link = doc.absoluteUrl(url,a.href)
title = a.text
print (price)
print (link)
print (title)
kijiji()
Result:
$310.00
https://www.kijiji.ca/v-mens-shoes/markham-york-region/jordan-4-oreo-2015/1485391828
Jordan 4 Oreo (2015)
$560.00
https://www.kijiji.ca/v-mens-shoes/markham-york-region/yeezy-boost-350-yecheil-reflectives/1486296645
Yeezy Boost 350 Yecheil Reflectives
...
Here are more examples:https://github.com/yiyedata/simplified-scrapy-demo/tree/master/doc_examples
from bs4 import BeautifulSoup
import requests
def kijiji():
source = requests.get('https://www.kijiji.ca/b-mens-shoes/markham-york-region/c15117001l1700274').text
soup = BeautifulSoup(source,'lxml')
b = soup.find('div', class_='price')
for link in soup.find_all('a',class_ = 'title'):
a = link.get('href')
fulllink = 'http://kijiji.ca'+a
print(fulllink)
print(b.prettify())
b = b.find_next('div', class_='price')
kijiji()
Was stuck on this for an hour, as soon as I posted this on stack I immediately came up with an idea, messy code but works!
I want to extract the description near the figure (the one that goes from "Figurine model" to "Stay Tuned :)") and store it into the variable information through BeautifulSoup. How can I do it?
Here's my code, but I don't know how to continue it:
from bs4 import BeautifulSoup
response = requests.get('https://www.myminifactory.com/object/3d-print-the-little-prince-4707')
soup = BeautifulSoup(response.text, "lxml")
information =
I show you below the page from where I want to extract the object's description. Thank you in advance!
This works for me, not proud of the script because of the way I used the break statement. But the script works.
from urllib.request import urlopen
from bs4 import BeautifulSoup as BS
url = r'https://www.myminifactory.com/object/3d-print-the-little-prince-4707'
html = urlopen(url).read()
Soup = BS(html,"lxml")
Desc = Soup.find('div',{'class':'short-text text-auto-link'}).text
description = ''
for line in Desc.split('\n'):
if line.strip() == '_________________________________________________________________________':
break
if line.strip():
description += line.strip()
print(description)
Find the parent tag then looking for <p>, fliter the spaces and ____
parent = soup.find("div",class_="row container-info-obj margin-t-10")
result = [" ".join(p.text.split()) for p in parent.find_all("p") if p.text.strip() and not "_"*8 in p.text]
#youtube_v = parent.find("iframe")["src"]
print(result)
My problem is when parsing a website and then loading the data tree with BS. How can I look for the content of an <em> Tag? I tried
for first in soup.find_all("li", class_="li-in"):
print first.select("em.fl.in-date").string
#or
print first.select("em.fl.in-date").contents
but it doesnt work. Pls help.
I am searching for cars on tutti.ch
Here is my entire code:
#Crawl tutti.ch
import urllib
thisurl = "http://www.tutti.ch/stgallen/fahrzeuge/autos"
handle = urllib.urlopen(thisurl)
html_gunk = handle.read()
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_gunk, 'html.parser')
for first in soup.find_all("li", class_="li-in"):
if first.a.string and "Audi" and "BMW" in first.a.string:
print "Geschafft: %s" % first.a.contents
print first.select("em.fl.in-date").string
else:
print first.a.contents
When it finds a bmw or audi it should check for when the car was inserted. The time is located in an em-Tag like this:
<em class="fl in-date">
Heute
<br></br>
13:59
</em>
first.select("em.fl.in-date").text
Assuming your selector is correct. You didn't provide which URL you're scraping, so I can't be sure.
>>> url = "http://stackoverflow.com/questions/38187213/python-beautifulsoup"
>>> from bs4 import BeautifulSoup
>>> import urllib2
>>> html = urllib2.urlopen(url).read()
>>> soup = BeautifulSoup(html)
>>> soup.find_all("p")[0].text
u'My problem is when parsing a website and then loading the data tree with BS. How can I look for the content of an <em> Tag? I tried '
After seeing your code, I made the following change, take a look:
#Crawl tutti.ch
import urllib
thisurl = "http://www.tutti.ch/stgallen/fahrzeuge/autos"
handle = urllib.urlopen(thisurl)
html_gunk = handle.read()
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_gunk, 'html.parser')
for first in soup.find_all("li", class_="li-in"):
if first.a.string and "Audi" and "BMW" in first.a.string:
print "Geschafft: %s" % first.a.contents
print first.select("em.fl.in-date")[0].text
else:
print first.a.contents