Web Scraping for tables in multiple pages using Beautifulsoup - python

I am trying to scrape a table from multiple pages for different weeks, however I keep on getting the results from this url https://www.boxofficemojo.com/weekly/2018W52/ , here's the code I am using:
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from time import sleep
from random import randint
import re
pages = np.arange(2015,2016)
week = ['01','02','03','04','05','06','07','08','09']
week1 = np.arange(10,11)
for x in week1:
week.append(x)
week
mov = soup.find_all("table", attrs={"class": "a-bordered"})
print("Number of tables on site: ",len(mov))
all_rows= []
all_rows= []
for page in pages:
for x in week:
url = requests.get('https://www.boxofficemojo.com/weekly/'+str(page)+'W'+str(x)+'/')
soup = BeautifulSoup(url.text, 'lxml')
mov = soup.find_all("table", attrs={"class": "a-bordered"})
table1 = mov[0]
body = table1.find_all("tr")
head = body[0]
body_rows = body[1:]
sleep(randint(2,10))
for row_num in range(len(body_rows)):
row = []
for row_item in body_rows[row_num].find_all("td"):
aa = re.sub("(\xa0)|(\n)|,","",row_item.text)
row.append(aa)
all_rows.append(row)
print('Page', page, x)

Assuming you want 52 weeks from each year why not generate the links in advance then use pandas to retrieve the table, create a list of such dataframes and concatenate those into a final dataframe?
import pandas as pd
def get_table(url):
year = int(url[37:41])
week_yr = int(url[42:44])
df = pd.read_html(url)[0]
df['year'] = year
df['week_yr'] = week_yr
return df
years = ['2015','2016']
weeks = [str(i).zfill(2) for i in range(1, 53)]
base = 'https://www.boxofficemojo.com/weekly'
urls = [f'{base}/{year}W{week}' for week in weeks for year in years]
results = pd.concat([get_table(url, int(url.split('/')[-1][:4])) for url in urls])
You might then look at ways of speeding things up e.g.
from multiprocessing import Pool, cpu_count
import pandas as pd
def get_table(url):
year = int(url[37:41])
week_yr = int(url[42:44])
df = pd.read_html(url)[0]
df['year'] = year
df['week_yr'] = week_yr
return df
if __name__ == '__main__':
years = ['2015','2016']
weeks = [str(i).zfill(2) for i in range(1, 53)]
base = 'https://www.boxofficemojo.com/weekly'
urls = [f'{base}/{year}W{week}' for week in weeks for year in years]
with Pool(cpu_count()-1) as p:
results = p.map(get_table, urls)
final = pd.concat(results)
print(final)

Related

Comparison between 2 columns in Pandas and printing only those rows that satisfy the condition

This is my code and I want to compare the goals and xG column and print only those rows which satisfy the condition goals > xG.
import json
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
pd.set_option("max_colwidth", 99999)
pd.set_option("max_rows", 99999)
url = "https://understat.com/league/EPL"
page_connect = urlopen(url)
page_html = BeautifulSoup(page_connect, "html.parser")
raw_string = page_html.findAll(name="script")[3].text
start_ind = raw_string.index("\\")
stop_ind = raw_string.index("')")
json_data = raw_string[start_ind:stop_ind]
json_data = json_data.encode("utf8").decode("unicode_escape")
final_json_df = pd.json_normalize(json.loads(json_data))
a = final_json_df[final_json_df.shots == 0]
final_json_df = final_json_df.astype({"goals" : 'float'})
final_json_df = final_json_df.astype({"xG" : 'float'})
I tried this:
final_json_df[final_json_df.goals>xG]
but it doesn't seem to work. It would be helpful if someone can give the solution and explain why final_json_df[final_json_df.goals>xG] doesn't work.

For Loop only prints the first value

I am trying to web scrape stock data using a for loop on a list of five stocks. The problem is only the first value is returned five times. I have tried appending to a list but it still doesn't work, although clearly I am not appending correctly. On the website, I want to get the data for Operating Cash which comes in the form of 14B or 1B for example, which is why I have removed the B and multiplied that number to get a raw value. Here is my code:
import requests
import yfinance as yf
import pandas as pd
from bs4 import BeautifulSoup
headers = {'User Agent':'Mozilla/5.0'}
stocks = ['AMC','AMD','PFE','AAPL', 'NVDA']
finished_list = []
for stock in stocks:
url = f'https://www.marketwatch.com/investing/stock/{stock}/financials/cash-flow'
res = requests.get(url)
soup = BeautifulSoup(res.content, 'lxml')
operating_cash = soup.findAll('div', class_ = "cell__content")[134].text
finished_list.append(operating_cash)
if 'B' in operating_cash:
cash1 = operating_cash.replace('B','')
if '(' in cash1:
cash2 = cash1.replace('(','-')
if ')' in cash2:
cash3 = cash2.replace(')','')
cash3 = float(cash3)
print(cash3*1000000000)
else:
cash1 = float(cash1)
print(cash1 * 1000000000)
The current output is -1060000000.0 five times in a row which is the correct value for operating cash for AMC but not for the other four. Thanks in advance to anyone who can help me out.
You don't need to use if conditions for str.replace(). Instead, do all your replacements in one line like so:
for stock in stocks:
url = f'https://www.marketwatch.com/investing/stock/{stock}/financials/cash-flow'
res = requests.get(url)
soup = BeautifulSoup(res.content, 'lxml')
operating_cash = soup.findAll('div', class_ = "cell__content")[134].text
finished_list.append(operating_cash)
cash = float(operating_cash.replace('B','').replace('(','-').replace(')',''))
print(cash*1000000000)
-1060000000.0
1070000000.0000001
14400000000.0
80670000000.0
5820000000.0

Python HTML Parser (Unnamed Level)

I am working on a screen scraper to pull football statistics down from www.pro-football-reference.com. I'm currently scraping from the main player's stat page and then diving into their individual page with their statistics by year.
I was able to implement this process successfully with my first set of players (quarterbacks, using the Passing Table). However, when I attempted to re-create the process to get running back data, I am reciving an additional column in my data frame with the values "Unnamed: x_level_0". This is my first experience with HTML data so I'm not sure what piece I missed, I just assumed it would be the same code as the quarterbacks.
Below is the QB Code sample and the correct dataframe:
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import pandas as pd
from pandas import DataFrame
import lxml
import re
import csv
p = 1
url = 'https://www.pro-football-reference.com'
year = 2020
maxp = 300
#Passing Data
r = requests.get(url+ '/years/' + str(year) + '/passing.htm')
soup = BeautifulSoup(r.content, 'html.parser')
parsed_table = soup.find_all('table')[0]
results = soup.find(id='div_passing')
job_elems = results.find_all('tr')
df = []
LastNameList = []
FirstNameList = []
for i,row in enumerate(parsed_table.find_all('tr')[2:]):
dat = row.find('td', attrs={'data-stat': 'player'})
if dat != None:
name = dat.a.get_text()
print(name)
stub = dat.a.get('href')
#pos = row.find('td', attrs={'data-stat': 'fantasy_pos'}).get_text()
#print(pos)
# grab this players stats
tdf = pd.read_html(url + stub)[1]
for k,v in tdf.iterrows():
#Scrape 2020 stats, if no 2020 stats move on
try:
FindYear=re.search(".*2020.*",v['Year'])
if FindYear:
#If Year for stats is current year append data to dataframe
#get Name data
fullName = row.find('td', {'class':'left'})['csk']
findComma = fullName.find(',',0,len(fullName))
lName = fullName[0:findComma]
fName = fullName[findComma + 1:len(fullName)]
LastNameList.append(lName)
FirstNameList.append(fName)
#get basic stats
df.append(v)
except:
pass
This output looks like the following:
Philip Rivers
Year 2020
Age 39
Tm IND
Pos qb
No. 17
G 1
GS 1
Below is the RB Code sample and the incorrect dataframe:
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import pandas as pd
from pandas import DataFrame
import lxml
import re
import csv
p = 1
url = 'https://www.pro-football-reference.com'
year = 2020
maxp = 300
#Rushing Data
r = requests.get(url+ '/years/' + str(year) + '/rushing.htm')
soup = BeautifulSoup(r.content, 'html.parser')
parsed_table = soup.find_all('table')[0]
results = soup.find(id='div_rushing')
job_elems = results.find_all('tr')
df = []
LastNameList = []
FirstNameList = []
for i,row in enumerate(parsed_table.find_all('tr')[2:]):
dat = row.find('td', attrs={'data-stat': 'player'})
if dat != None:
name = dat.a.get_text()
print(name)
stub = dat.a.get('href')
print(stub)
#pos = row.find('td', attrs={'data-stat': 'fantasy_pos'}).get_text()
#print(pos)
# grab this players stats
tdf = pd.read_html(url + stub)[1]
for k,v in tdf.iterrows():
print(v)
#Scrape 2020 stats, if no 2020 stats move on
try:
FindYear=re.search(".*2020.*",v['Year'])
print('found 2020')
if FindYear:
#If Year for stats is current year append data to dataframe
#get Name data
fullName = row.find('td', {'class':'left'})['csk']
findComma = fullName.find(',',0,len(fullName))
lName = fullName[0:findComma]
fName = fullName[findComma + 1:len(fullName)]
LastNameList.append(lName)
FirstNameList.append(fName)
#get basic stats
df.append(v)
except:
pass
This output looks like the following:
Unnamed: 0_level_0 Year 2020
Unnamed: 1_level_0 Age 26
Unnamed: 2_level_0 Tm TEN
Unnamed: 3_level_0 Pos rb
Unnamed: 4_level_0 No. 22
Games G 1
GS 1
Rushing Rush 31
Yds 116
TD 0
An example URL where this data is pulled from is: https://www.pro-football-reference.com/players/J/JacoJo01.htm
And it is pulling the Rushing & Receiving. Is there something additional I need to be on the lookout for when it comes to parsing HTML?
I attempted to add index_col = 1 into my tdf = pd.read_html(url + stub)[1]. However, that just kind of grouped the two values into one column.
Any input on this would be greatly appreciated. If I can provide any further information, please let me know.
Thank you
You can try this code to parse the table passing for each player (Now I get the players from https://www.pro-football-reference.com/years/2020/passing.htm but you can pass any player URL to it:
import requests
from bs4 import BeautifulSoup
def scrape_player(player_name, player_url, year="2020"):
out = []
soup = BeautifulSoup(requests.get(player_url).content, 'html.parser')
row = soup.select_one('table#passing tr:has(th:contains("{}"))'.format(year))
if row:
tds = [player_name] + [t.text for t in row.select('th, td')]
headers = ['Name'] + [th.text for th in row.find_previous('thead').select('th')]
out.append(dict(zip(headers, tds)))
return out
url = 'https://www.pro-football-reference.com/years/2020/passing.htm'
all_data = []
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
for player in soup.select('table#passing [data-stat="player"] a'):
print(player.text)
for data in scrape_player(player.text, 'https://www.pro-football-reference.com' + player['href']):
all_data.append(data)
df = pd.DataFrame(all_data)
df.to_csv('data.csv')
print(df)
Creates this csv:
EDIT: To parse Rushing&Receiving, you can use this script:
import requests
from bs4 import BeautifulSoup, Comment
def scrape_player(player_name, player_url, year="2020"):
out = []
soup = BeautifulSoup(requests.get(player_url).content, 'html.parser')
soup = BeautifulSoup(soup.select_one('#rushing_and_receiving_link').find_next(text=lambda t: isinstance(t, Comment)), 'html.parser')
row = soup.select_one('table#rushing_and_receiving tr:has(th:contains("{}"))'.format(year))
if row:
tds = [player_name] + [t.text for t in row.select('th, td')]
headers = ['Name'] + [th.text for th in row.find_previous('thead').select('tr')[-1].select('th')]
out.append(dict(zip(headers, tds)))
return out
url = 'https://www.pro-football-reference.com/years/2020/passing.htm'
all_data = []
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
for player in soup.select('table#passing [data-stat="player"] a'):
print(player.text)
for data in scrape_player(player.text, 'https://www.pro-football-reference.com' + player['href']):
all_data.append(data)
df = pd.DataFrame(all_data)
df.to_csv('data.csv')
print(df)
Creates this CSV:

DataFrame and Lists returning empty after re-running script

I am currently trying to scrape data from 1001TrackLists, a website that lists tracks in DJ mixes, using BeautifulSoup.
I wrote a script to collect all track information and create a dataframe which worked perfectly when I first finished it and returned the dataframe as expected. However, when I closed my jupyter notebook and restarted Python, the script returns a blank dataframe that only returns the column headers. Each list in the for loops that I created which I used to build the dataframe are also blank.
I've tried restarting my kernel, restarting/clearing output, and restarting my computer - nothing seems to work.
Here's my code so far:
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
import numpy as np
import re
import urllib.request
import matplotlib.pyplot as plt
url_list = ['https://www.1001tracklists.com/tracklist/yj03rk/joy-orbison-resident-advisor-podcast-331-2012-10-01.html', 'https://www.1001tracklists.com/tracklist/50khrzt/joy-orbison-greenmoney-radio-2009-08-16.html', 'https://www.1001tracklists.com/tracklist/7mzt0y9/boddika-joy-orbison-rinse-fm-hessle-audio-cover-show-2014-01-16.html', 'https://www.1001tracklists.com/tracklist/6l8q8l9/joy-orbison-bbc-radio-1-essential-mix-2014-07-26.html', 'https://www.1001tracklists.com/tracklist/5y6fl1k/kerri-chandler-joy-orbison-ben-ufo-bbc-radio-1-essential-mix-07-18-live-from-lovebox-festival-2015-07-24.html', 'https://www.1001tracklists.com/tracklist/1p6g9u49/joy-orbison-andrew-lyster-nts-radio-2016-07-23.html', 'https://www.1001tracklists.com/tracklist/qgz18zk/joy-orbison-dekmantel-podcast-081-2016-08-01.html', 'https://www.1001tracklists.com/tracklist/26wlts2k/george-fitzgerald-joy-orbison-bbc-radio-1-residency-2016-11-03.html', 'https://www.1001tracklists.com/tracklist/t9gkru9/james-blake-joy-orbison-bbc-radio-1-residency-2018-02-22.html', 'https://www.1001tracklists.com/tracklist/2gfzrxw1/joy-orbison-felix-hall-nts-radio-2019-08-23.html']
djnames = []
tracknumbers = []
tracknames = []
artistnames = []
mixnames = []
dates = []
url_scrape = []
for url in url_list:
count = 0
headers = {'User-Agent': 'Chrome/51.0.2704.103'}
page_link = url
page_response = requests.get(page_link, headers=headers)
soup = bs(page_response.content, "html.parser")
title = (page_link[48:-15])
title = title.replace('-', ' ')
title = (title[:-1])
title = title.title()
date = (page_link[-15:-5])
tracknames_scrape = soup.find_all("div", class_="tlToogleData")
artistnames_scrape = soup.find_all("meta", itemprop="byArtist")
for (i, track) in enumerate(tracknames_scrape):
if track.meta:
trackname = track.meta['content']
tracknames.append(trackname)
mixnames.append(title)
dates.append(date)
djnames.append('Joy Orbison')
url_scrape.append(url2)
count +=1
tracknumbers.append(count)
else:
continue
for artist in artistnames_scrape:
artistname = artist["content"]
artistnames.append(artistname)
df = pd.DataFrame({'DJ Name': djnames, 'Date': dates, 'Mix Name': mixnames, 'Track Number': tracknumbers,'Track Names': tracknames, 'Artist Names': artistnames, 'URL':url_scrape})
Change the line 38th line from url_scrape.append(url2) to the following and it works:
url_scrape.append(url)
Otherwise you get NameError: name 'url2' is not defined.

How do I create a dataframe of jobs and companies that includes hyperlinks?

I am making a function to print a list of links so I can add them to a list of companies and job titles. However, I am having difficulties navigating tag sub-contents. I am looking to list all the 'href' in 'a' in 'div' like so:
from bs4 import BeautifulSoup
import re
import pandas as pd
import requests
page = "https://www.indeed.com/q-software-developer-l-San-Francisco-jobs.html"
headers = {'User-Agent':'Mozilla/5.0'}
def get_soup():
session = requests.Session()
pageTree = session.get(page, headers=headers)
return BeautifulSoup(pageTree.content, 'html.parser')
pageSoup = get_soup()
def print_links():
"""this function scrapes the job title links"""
jobLink = [div.a for div in pageSoup.find_all('div', class_='title')]
for div in jobLink:
print(div['href'])
I am trying to make a list but my result is simply text and does not seem to be a link like so:
/pagead/clk?mo=r&ad=-6NYlbfkN0DhVAxkc_TxySVbUOs6bxWYWOfhmDTNcVTjFFBAY1FXZ2RjSBnfHw4gS8ZdlOOq-xx2DHOyKEivyG9C4fWOSDdPgVbQFdESBaF5zEV59bYpeWJ9R8nSuJEszmv8ERYVwxWiRnVrVe6sJXmDYTevCgexdm0WsnEsGomjLSDeJsGsHFLAkovPur-rE7pCorqQMUeSz8p08N_WY8kARDzUa4tPOVSr0rQf5czrxiJ9OU0pwQBfCHLDDGoyUdvhtXy8RlOH7lu3WEU71VtjxbT1vPHPbOZ1DdjkMhhhxq_DptjQdUk_QKcge3Ao7S3VVmPrvkpK0uFlA0tm3f4AuVawEAp4cOUH6jfWSBiGH7G66-bi8UHYIQm1UIiCU48Yd_pe24hfwv5Hc4Gj9QRAAr8ZBytYGa5U8z-2hrv2GaHe8I0wWBaFn_m_J10ikxFbh6splYGOOTfKnoLyt2LcUis-kRGecfvtGd1b8hWz7-xYrYkbvs5fdUJP_hDAFGIdnZHVJUitlhjgKyYDIDMJ-QL4aPUA-QPu-KTB3EKdHqCgQUWvQud4JC2Fd8VXDKig6mQcmHhZEed-6qjx5PYoSifi5wtRDyoSpkkBx39UO3F918tybwIbYQ2TSmgCHzGm32J4Ny7zPt8MPxowRw==&p=0&fvj=1&vjs=3
Additionally, here is my attempt at making a list with the links:
def get_job_titles():
"""this function scrapes the job titles"""
jobs = []
jobTitle = pageSoup.find_all('div', class_='title')
for span in jobTitle:
link = span.find('href')
if link:
jobs.append({'title':link.text,
'href':link.attrs['href']})
else:
jobs.append({'title':span.text, 'href':None})
return jobs
I would regex out from html returned the required info and construct the url from the parameters the page javascript uses to dynamically construct each url. Interestingly, the total number of listings is different when using requests than using browser. You can manually enter the number of listings e.g. 6175 (currently) or use the number returned by the request (which is lower and you miss some results). You could also use selenium to get the correct initial result count). You can then issue requests with offsets to get all listings.
Listings can be randomized in terms of ordering.
It seems you can introduce a limit parameter to increase results_per_page up to 50 e.g.
https://www.indeed.com/jobs?q=software+developer&l=San+Francisco&limit=50&start=0
Furthermore, it seems that it is possible to retrieve more results that are actually given as the total results count on webpage.
py with 10 per page:
import requests, re, hjson, math
import pandas as pd
from bs4 import BeautifulSoup as bs
p = re.compile(r"jobmap\[\d+\]= ({.*?})")
p1 = re.compile(r"var searchUID = '(.*?)';")
counter = 0
final = {}
with requests.Session() as s:
r = s.get('https://www.indeed.com/q-software-developer-l-San-Francisco-jobs.html#')
soup = bs(r.content, 'lxml')
tk = p1.findall(r.text)[0]
listings_per_page = 10
number_of_listings = int(soup.select_one('[name=description]')['content'].split(' ')[0].replace(',',''))
#number_of_pages = math.ceil(number_of_listings/listings_per_page)
number_of_pages = math.ceil(6175/listings_per_page) #manually calculated
for page in range(1, number_of_pages + 1):
if page > 1:
r = s.get('https://www.indeed.com/jobs?q=software+developer&l=San+Francisco&start={}'.format(10*page-1))
soup = bs(r.content, 'lxml')
tk = p1.findall(r.text)[0]
for item in p.findall(r.text):
data = hjson.loads(item)
jk = data['jk']
row = {'title' : data['title']
,'company' : data['cmp']
,'url' : f'https://www.indeed.com/viewjob?jk={jk}&tk={tk}&from=serp&vjs=3'
}
final[counter] = row
counter+=1
df = pd.DataFrame(final)
output_df = df.T
output_df.to_csv(r'C:\Users\User\Desktop\Data.csv', sep=',', encoding='utf-8-sig',index = False )
If you want to use selenium to get correct initial listings count:
import requests, re, hjson, math
import pandas as pd
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("--headless")
d = webdriver.Chrome(r'C:\Users\HarrisQ\Documents\chromedriver.exe', options = options)
d.get('https://www.indeed.com/q-software-developer-l-San-Francisco-jobs.html#')
number_of_listings = int(d.find_element_by_css_selector('[name=description]').get_attribute('content').split(' ')[0].replace(',',''))
d.quit()
p = re.compile(r"jobmap\[\d+\]= ({.*?})")
p1 = re.compile(r"var searchUID = '(.*?)';")
counter = 0
final = {}
with requests.Session() as s:
r = s.get('https://www.indeed.com/q-software-developer-l-San-Francisco-jobs.html#')
soup = bs(r.content, 'lxml')
tk = p1.findall(r.text)[0]
listings_per_page = 10
number_of_pages = math.ceil(6175/listings_per_page) #manually calculated
for page in range(1, number_of_pages + 1):
if page > 1:
r = s.get('https://www.indeed.com/jobs?q=software+developer&l=San+Francisco&start={}'.format(10*page-1))
soup = bs(r.content, 'lxml')
tk = p1.findall(r.text)[0]
for item in p.findall(r.text):
data = hjson.loads(item)
jk = data['jk']
row = {'title' : data['title']
,'company' : data['cmp']
,'url' : f'https://www.indeed.com/viewjob?jk={jk}&tk={tk}&from=serp&vjs=3'
}
final[counter] = row
counter+=1
df = pd.DataFrame(final)
output_df = df.T
output_df.to_csv(r'C:\Users\User\Desktop\Data.csv', sep=',', encoding='utf-8-sig',index = False )

Categories

Resources