Below is code python code output.I want output in rows and column in dataframe:
response = requests.get(source_data)
soup = BeautifulSoup(response.text, "html.parser")
States = soup.find_all('div',class_ = 'card bg-darker p-3 mb-3')
for item in States :
state_name = item.find(class_='fw-bold fs-5 mb-2').text
vaccinated_per = item.find(class_='col-3 text-end fs-5 ff-s text-success').text
print(state_name,vaccinated_per)
Output:
Flanders 80.24%
Wallonia 70.00%
Brussels 56.73%
Ostbelgien 65.11%
Collect your information in a list of dicts and then simply create a data frame from it:
data = []
for item in States :
data.append({
'state_name' : item.find(class_='fw-bold fs-5 mb-2').text,
'vaccinated_per' : item.find(class_='col-3 text-end fs-5 ff-s text-success').text
})
pd.DataFrame(data)
Example
from bs4 import BeautifulSoup
import requests
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
response = requests.get('https://covid-vaccinatie.be/en', headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
States = soup.find_all('div',class_ = 'card bg-darker p-3 mb-3')
data = []
for item in States :
data.append({
'state_name' : item.find(class_='fw-bold fs-5 mb-2').text,
'vaccinated_per' : item.find(class_='col-3 text-end fs-5 ff-s text-success').text
})
pd.DataFrame(data)
Output
state_name vaccinated_per
0 Flanders 80.24%
1 Wallonia 70.00%
2 Brussels 56.73%
3 Ostbelgien 65.11%
Related
With the current code, I can scrape multiple prices, but it doesn't automatically re-scrape them every 2 minutes which is what I need.
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
mystocks = ['GOOG', 'META', 'MSFT', 'PLTR', 'TSLA', 'ZS', 'PYPL', 'SHOP', 'TTCF']
stockdata = []
def getData(symbol):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'}
url = f'https://finance.yahoo.com/quote/{symbol}'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
stock = {
'symbol': symbol,
'price': soup.find('div', {'class':'D(ib) Mend(20px)'}).find_all('fin-streamer')[0].text,
}
return stock
for item in mystocks:
stockdata.append(getData(item))
def export_data(stockdata):
df = pd.DataFrame(stockdata)
df.to_excel("LETS GO2.xlsx")
if __name__ == '__main__':
while True:
getData(item)
export_data(stockdata)
time_wait = 2
print(f'Waiting {time_wait} minutes...')
time.sleep(time_wait * 60)
Your for-loop is at the wrong place.
Try to put it in your while True: block to loop over every ticker every two minutes.
EDIT:
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
mystocks = ['GOOG', 'META', 'MSFT', 'PLTR', 'TSLA', 'ZS', 'PYPL', 'SHOP', 'TTCF']
def getData(symbol):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'}
url = f'https://finance.yahoo.com/quote/{symbol}'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
stock = {
'symbol': symbol,
'price': soup.find('div', {'class': 'D(ib) Mend(20px)'}).find_all('fin-streamer')[0].text,
}
return stock
def export_data(stockdata):
df = pd.DataFrame(stockdata)
df.to_excel("LETS GO2.xlsx")
if __name__ == "__main__":
while True:
stockdata = []
for item in mystocks:
print(item)
stockdata.append(getData(item))
export_data(stockdata)
time_wait = 0.1
print(f'Waiting {time_wait} minutes...')
time.sleep(time_wait * 60)
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'}
URL = "https://www.amazon.com/TRESemm%C3%A9-Botanique-Shampoo-Nourish-Replenish/dp/B0199WNJE8/ref=sxin_14_pa_sp_search_thematic_sspa?content-id=amzn1.sym.a15c61b7-4b93-404d-bb70-88600dfb718d%3Aamzn1.sym.a15c61b7-4b93-404d-bb70-88600dfb718d&crid=2HG5WSUDCJBMZ&cv_ct_cx=hair%2Btresemme&keywords=hair%2Btresemme&pd_rd_i=B0199WNJE8&pd_rd_r=28d72361-7f35-4b1a-be43-98e7103da70c&pd_rd_w=6UL4P&pd_rd_wg=JtUqB&pf_rd_p=a15c61b7-4b93-404d-bb70-88600dfb718d&pf_rd_r=DFPZNAG391M5JS55R6HP&qid=1660432925&sprefix=hair%2Btresemme%2Caps%2C116&sr=1-3-a73d1c8c-2fd2-4f19-aa41-2df022bcb241-spons&smid=A3DEFW12560V8M&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUExQlM3VFpGRVM5Tk8wJmVuY3J5cHRlZElkPUEwNjE5MjQwM01JV0FNN1pOMlRHSSZlbmNyeXB0ZWRBZElkPUEwNTA1MDQyMlQ5RjhRQUxIWEdaUiZ3aWRnZXROYW1lPXNwX3NlYXJjaF90aGVtYXRpYyZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU&th=1"
webpage = requests.get(URL, headers=headers)
soup = BeautifulSoup(webpage.content)
rank = soup.select_one('#detailBulletsWrapper_feature_div span:-soup-contains("Best Seller")').contents[2].get_text().split()[0]
Category = soup.select_one('#detailBulletsWrapper_feature_div span:-soup-contains("Best Seller")').contents[2].get_text().split()[2:6]
Category = ' '.join(Category)
type(rank)
type(Category)
import string
for char in string.punctuation:
rank = rank.replace(char, '')
print(rank)
print(Category)
I have other URLs similar to this and I want to loop through them: Here are the links: How can I loop through them and save them to a csv file. Thank you very much in advanced!
URL = ['https://www.amazon.com/Dove-Intensive-Concentrate-Technology-Protects/dp/B0B1VVXTKL',
'https://www.amazon.com/Dove-Intensive-Concentrate-Conditioner-Technology/dp/B0B1VXFLQ2']
You could use a for-loop to iterate the list:
for url in URL:
webpage = requests.get(url, headers=headers)
soup = BeautifulSoup(webpage.content)
Note: amazon do not want to be scraped, so it is a question of time, that they will block you. May use some delay, rotating proxy, ...
Example
import requests
import pandas as pd
from bs4 import BeautifulSoup
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'}
URL = ['https://www.amazon.com/Dove-Intensive-Concentrate-Technology-Protects/dp/B0B1VVXTKL',
'https://www.amazon.com/Dove-Intensive-Concentrate-Conditioner-Technology/dp/B0B1VXFLQ2']
data = []
for url in URL:
webpage = requests.get(url, headers=headers)
soup = BeautifulSoup(webpage.content)
data.append({
'url':url,
'rank':soup.select_one('#detailBulletsWrapper_feature_div span:-soup-contains("Best Seller")').contents[2].split()[0][1:],
'category':soup.select_one('#detailBulletsWrapper_feature_div span:-soup-contains("Best Seller") a').text.split('Top 100 in ')[-1]
})
pd.DataFrame(data).to_csv('myfile.csv', index=False)
import requests
from bs4 import BeautifulSoup
import pandas as pd
baseurl='https://twillmkt.com'
headers ={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
}
r =requests.get('https://twillmkt.com/collections/denim')
soup=BeautifulSoup(r.content, 'html.parser')
tra = soup.find_all('div',class_='ProductItem__Wrapper')
productlinks=[]
Title=[]
Brand=[]
Colour=[]
for links in tra:
for link in links.find_all('a',href=True)[1:]:
comp=baseurl+link['href']
productlinks.append(comp)
for link in productlinks:
r =requests.get(link,headers=headers)
soup=BeautifulSoup(r.content, 'html.parser')
title=soup.find('h1').text
Title.append(title)
price=soup.find('span',class_="money").text
Brand.append(price)
Product_Features=soup.find_all('li').text
Colour.append(Product_Features)
df = pd.DataFrame(
{"Title": Title, "Price": Brand,"Product_Features":Colour}
)
print(df)
I scrape the title and price but difficult to scrape these information SIZE,product feature,material,model size,image
Single Page Link
https://twillmkt.com/products/light-blue-butterfly-print-slim-leg-denim?variant=39498848403534
i'll give you the css selectors for the elements you want. you can catch them with soup.select()
size: select[id^="product-select"] option
product feature: div.ProductMeta__Description.Rte li
material: div.ProductMeta__Description.Rte p:nth-of-type(2)
model size: div.ProductMeta__Description.Rte p:nth-of-type(3)
image: div[id^="Image"] span img
I'm trying to scrape this website
https://www.merinfo.se/search?d=c&ap=1&emp=0%3A20&rev=0%3A100&who=bygg&bf=1&page=1
And I've put a def getQuestions(tag) in the who={tag} part of the url and that works fine. When I try to add def getQuestions(tag, page) page={page} it just returns 0 in the terminal, and I really hope no clue what could be causing this.
Here is the full code:
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}
questionlist = []
def getQuestions(tag, page):
url = 'https://www.merinfo.se/search?d=c&ap=1&emp=0%3A20&rev=0%3A100&who={bygg}&bf=1&page={page}'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
questions = soup.find_all('div', {'class': 'box-white p-0 mb-4'})
for item in questions:
question = {
'title': item.find('a', {'class': 'link-primary'}).text,
'link': item.find('a', {'class': 'link-primary'})['href'],
'nummer': item.find('a', {'class': 'link-body'})['href'],
'address': item.find('address', {'class': 'mt-2 mb-0'}).text,
'RegÅr': item.find('div', {'class': 'col text-center'}).text,
}
questionlist.append(question)
return
for x in range(1,5):
getQuestions('bygg', x)
print(len(questionlist))
Any help would be appreciated. Best regards!
Change the string in url variable to f-string:
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
def getQuestions(tag, page):
questionlist = []
url = f"https://www.merinfo.se/search?d=c&ap=1&emp=0%3A20&rev=0%3A100&who={tag}&bf=1&page={page}"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "html.parser")
questions = soup.find_all("div", {"class": "box-white p-0 mb-4"})
for item in questions:
question = {
"title": item.find("a", {"class": "link-primary"}).text,
"link": item.find("a", {"class": "link-primary"})["href"],
"nummer": item.find("a", {"class": "link-body"})["href"],
"address": item.find("address", {"class": "mt-2 mb-0"}).text,
"RegÅr": item.find("div", {"class": "col text-center"}).text,
}
questionlist.append(question)
return questionlist
out = []
for x in range(1, 5):
out.extend(getQuestions("bygg", x))
print(len(out))
Prints:
80
Try changing your url to this:
url = f'https://www.merinfo.se/search?d=c&ap=1&emp=0%3A20&rev=0%3A100&who={tag}&bf=1&page={page}'
You didn't quite have your f-Strings set up right
i have an issue with collecting all the data in the website. when i run my code it only prints out the first entry. And it should print out every Song, Artist, and Rank. Also It doesn't show on Csv.
from bs4 import BeautifulSoup
import requests
import csv
my_url = "https://www.billboard.com/charts/hot-100"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/87.0.4280.141 Safari/537.36'
}
r = requests.get(my_url)
page_soup = BeautifulSoup(r.content, 'lxml')
filename = "Billboard100.csv"
csv_writer = csv.writer(open(filename, 'w'))
Chart = page_soup.findAll('ol', class_='chart-list__elements')
BB = []
for item in Chart:
Song = item.find('span', class_='chart-element__information__song text--truncate color--primary').text.strip()
Artist = item.find('span', class_='chart-element__information__artist text--truncate color--secondary').text.strip()
Rank = item.find('span', class_='chart-element__rank__number').text.strip()
Billboard = {
'Song': Song,
'Artist': Artist,
'Rank': Rank,
}
BB.append(Billboard)
print(BB)
with open("Billboard100.csv", "w",
newline="") as infile:
writer = csv.writer(infile)
In your code Char length was one. Use Chart = page_soup.find_all('li', {'class': 'chart-list__element display--flex'}) to select all the entity.
from bs4 import BeautifulSoup
import requests
import csv
my_url = "https://www.billboard.com/charts/hot-100"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}
r = requests.get(my_url)
page_soup = BeautifulSoup(r.content, 'lxml')
filename = "Billboard100.csv"
csv_writer = csv.writer(open(filename, 'w'))
Chart = page_soup.find_all('li', {'class': 'chart-list__element display--flex'})
BB = []
for item in Chart:
Song = item.find('span', class_='chart-element__information__song text--truncate color--primary').text.strip()
Artist = item.find('span', class_='chart-element__information__artist text--truncate color--secondary').text.strip()
Rank = item.find('span', class_='chart-element__rank__number').text.strip()
Billboard = {
'Song': Song,
'Artist': Artist,
'Rank': Rank,
}
BB.append(Billboard)
print(BB)
with open("Billboard100.csv", "w",newline="") as infile:
writer = csv.writer(infile)
for row in BB:
writer.writerow([row])
Your code isn't well indented. In python, always check your indentation if some code doesn't run at all. official doc reference
for item in Chart:
Song = item.find('span', class_='chart-element__information__song text--truncate color--primary').text.strip()
Artist = item.find('span', class_='chart-element__information__artist text--truncate color--secondary').text.strip()
Rank = item.find('span', class_='chart-element__rank__number').text.strip()
Billboard = {
should be
for item in Chart:
Song = item.find('span', class_='chart-element__information__song text--truncate color--primary').text.strip()
Artist = item.find('span', class_='chart-element__information__artist text--truncate color--secondary').text.strip()
Rank = item.find('span', class_='chart-element__rank__number').text.strip()
Billboard = {