Python 3, bs4, webcrawler; error connecting too website - python

I am trying to build a web-crawler for a specific website.
But for some reason I won't connect to the website.
I get a error (made myself) it can't connect.
Using selesium tot call up the website, I see it doesn't connect
As a newbie I am probably making a stupid mistake but I can't figure out what.
Hoping you are willing to help me.
import csv
import requests
import datetime
from time import sleep, time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
browser = webdriver.Chrome('C:/Users/907133/Pythonstuff/chromedriver')
browser.set_window_position(0,0)
captcha = input('Press Enter after bypassing Captcha')
# def get_driver():
# driver = webdriver.Chrome()
# return driver
def get_driver():
# initialize options
options = webdriver.ChromeOptions()
# pass in headless argument to options
options.add_argument('--headless')
# initialize driver
driver = webdriver.Chrome(chrome_options=options)
return driver
def connect_to_base(browser, page_number):
base_url = f'https://www.jaap.nl/koophuizen/noord+holland/groot-amsterdam/amsterdam/p{page_number}'
html = None
links = None
connection_attempts = 0
while connection_attempts < 3:
try:
browser.get(base_url)
#wait for table element with id = 'map' to load
#before returning True
WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'result-content')))
return True
except Exception as ex:
connection_attempts += 1
print(f'Error connecting to {base_url}')
print(f'Attempt #{connection_attempts}')
return False
def parse_html(html):
soup = BeautifulSoup(html, 'html.parser')
inside = soup.find_all('a', {'class':'property-inner'},{'href'})
# Make empty lists with header lines
output_list = []
listing = 1
for items in inside:
href = items.get('href')
url1 = href.format(page)
if len(browser.find_elements_by_xpath("//a[#class='CookiesOK']"))>0:
browser.find_element_by_xpath("//a[#class='CookiesOK']").click()
connection_attempts = 0
while connection_attempts < 3:
try:
browser.get(url1)
WebDriverWait(browser, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, 'detail-address')))
return True
except Exception as ex:
connection_attempts += 1
print(f'Error connecting to {base_url}')
print(f'Attempt #{connection_attempts}')
details = BeautifulSoup(browser.page_source, 'html')
adres = details.find_all ('div', {'class':'detail-address'})
for adresinfo in adres:
try:
adres = adres[0].get_text(separator=',', strip=True)
except Indexerror:
adres = "Unknown"
kenmerken = details.find_all ('div', {'class':'detail-tab-content kenmerken'})
try:
tr_kenmerken = ','.join([td.text.strip() for td in kenmerken[0].select('td.value')])
except IndexError:
tr_kenmerken = 'Unknown'
waarde = details.find_all ('div', {'class':'detail-tab-content woningwaarde'})
try:
tr_waarde = ','.join([td.text.strip() for td in waarde[0].select('td.value')])
except IndexError:
tr_waarde = 'Unknown'
informatie = {
'adres': adres,
'kenmerken': tr_kenmerken,
'waarde': tr_waarde,
'url': href
}
output_list.append(informatie)
listing += 1
return output_list
def get_load_time(article_url):
try:
# set headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
# make get request to article_url
response = requests.get(
article_url, headers=headers, stream=True, timeout=3.000)
# get page load time
load_time = response.elapsed.total_seconds()
except Exception as ex:
load_time = 'Loading Error'
return load_time
def write_to_file(output_list, filename):
for row in output_list:
with open(filename, 'a') as csvfile:
fieldnames = ['adres', 'kenmerken', 'waarde', 'link']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(row)
def run_process(page_number, filename, browser):
if connect_to_base(browser, page_number):
sleep(2)
html = browser.page_source
output_list = parse_html(html)
write_to_file(output_list, filename)
else:
print('Error connecting to jaap')
if __name__ == '__main__':
# set variables
start_time = time()
current_page = 1
output_timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
output_filename = f'output_{output_timestamp}.csv'
browser = get_driver()
# scrape and crawl
while current_page <= 3:
print(f'Scraping page #{current_page}...')
run_process(current_page, output_filename, browser)
current_page = current_page + 1
# exit
browser.quit()
end_time = time()
elapsed_time = end_time - start_time
print(f'Elapsed run time: {elapsed_time} seconds')

I see you fixed EC.presence_of_element_located((By.ID,{'class':'result-content'})) to be EC.presence_of_element_located((By.CLASS_NAME,'result-content')))
Next, you might have an issue with (depending where the browser is opened) of having to bypass/clicking a javascript that says you are ok and accept cookies.
But all that code seems to be an awful lot of work considering the data is stored as a json format in the script tags from the html. Why not just simply use requests, pull out the json, convert to dataframe, then write to csv?
import requests
import datetime
from time import sleep, time
from bs4 import BeautifulSoup
import json
import pandas as pd
from pandas.io.json import json_normalize
def run_process(page_number):
base_url = f'https://www.jaap.nl/koophuizen/noord+holland/groot-amsterdam/amsterdam/p{page_number}'
response = requests.get(base_url)
soup = BeautifulSoup(response.text, 'html.parser')
jsonStr = soup.find('script', {'id':'page-data'}).text
jsonData = json.loads(jsonStr)
df = json_normalize(jsonData['properties'])
return df
if __name__ == '__main__':
# set variables
start_time = time()
current_page = 1
output_timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
output_filename = f'C:/test/output_{output_timestamp}.csv'
final_df = pd.DataFrame()
while current_page <= 3:
print(f'Scraping page #{current_page}...')
df = run_process(current_page)
final_df = final_df.append(df, sort=True).reset_index(drop=True)
current_page = current_page + 1
final_df.to_csv(output_filename, index=False)
end_time = time()
elapsed_time = end_time - start_time
print(f'Elapsed run time: {elapsed_time} seconds')
Output:
Scraping page #1...
Scraping page #2...
Scraping page #3...
Elapsed run time: 7.441420555114746 seconds
and the csv file that looks like:
app area detailsUrl expired houseTypeValue id latLng latLng.latitude latLng.longitude location.city location.street location.zipcode lotSize market numberOfRooms openHouseDate openHouseTimes openhouse photo price priceToShow showoffColor showoffCustomText showoffPhotoText spotlight status veiling
0 False 165 /te-koop/noord+holland/groot-amsterdam/amsterd... False Herenhuis 6899666 NaN 52.368420 4.833631 AMSTERDAM Hof van Versailles 61 1064NX 216 sale 4 None None False 10014EAAF8B8883668593EFAC9E5FF1C 595000.0 595000.0 None None None False Sale False
1 True 211 /te-koop/noord+holland/groot-amsterdam/amsterd... False Appartement 10585731 NaN 52.327550 4.889076 AMSTERDAM Beysterveld 35 1083KA Onbekend sale 4 None None False E4F9E5BC7BC90B5B92C7BD8D48B7A677 925000.0 925000.0 None None None False Sale False
2 True 111 /te-koop/noord+holland/groot-amsterdam/amsterd... False Dubbele bovenwoning 11731386 NaN 52.341890 4.896053 AMSTERDAM Uiterwaardenstraat 320 2 1079DC Onbekend sale 5 None None False AB9F45B2CD4AD7879C5A80F18092F9D4 750000.0 750000.0 None None None False SoldConditionally False
3 False 269 /te-koop/noord+holland/groot-amsterdam/amsterd... False Herenhuis 11840681 NaN 52.358266 4.875508 AMSTERDAM Korte van Eeghenstraat 4 1071ER 107 sale 9 None None False A3DF2B1D426B5E4D501503C5D0E66966 3100000.0 3100000.0 None None None False Sale False
4 False 100 /te-koop/noord+holland/groot-amsterdam/amsterd... False Tussenwoning 12152943 NaN 52.421245 4.899478 AMSTERDAM Pieter A v Heijningestraat 9 1035SV 83 sale 5 None None False 55C6F589523FA553D67A709776DD70DD 399000.0 399000.0 None None None False Sale False
5 True 111 /te-koop/noord+holland/groot-amsterdam/amsterd... False Bovenwoning 15796874 NaN NaN NaN AMSTERDAM Eerste Amstelvlietpad 20 1096GB Onbekend sale 3 None None False AE822B627ED096310B9ECBE7756340C8 1200000.0 1200000.0 None None None False Sale False
6 True 76 /te-koop/noord+holland/groot-amsterdam/amsterd... False Benedenwoning 10580650 NaN 52.346010 4.888799 AMSTERDAM Grevelingenstraat 18 HS 1078KP Onbekend sale 2 None None False 6FD1011D917E776DCF4DA836B5FFEE3E 550000.0 550000.0 None None None False SoldConditionally False
7 False 298 /te-koop/noord+holland/groot-amsterdam/amsterd... False Villa 9623182 NaN 52.330610 4.862902 AMSTERDAM Cannenburg 51 1081GW 651 sale 7 None None False 15FA170B99D4E2DEA03B6FC27E3B5B74 2495000.0 2495000.0 None None None False Sale False
8 False 270 /te-koop/noord+holland/groot-amsterdam/amsterd... False Herenhuis 15791215 NaN 52.347780 5.004530 AMSTERDAM Nico Jessekade 189 1087MR 200 sale 9 None None False 6EA5C0CDA0475DFC88A3A918A6B2909A 1549000.0 1549000.0 None None None False SoldConditionally False
9 False 201 /te-koop/noord+holland/groot-amsterdam/amsterd... False Villa 9617942 NaN 52.377391 4.764554 AMSTERDAM Osdorperweg 803 1067SW 1348 sale 6 None None False 4680429D99EC5AC47C950D57A77DF1EB 950000.0 950000.0 None None None False Sale False
UPDATE:
import requests
import datetime
from time import sleep, time
from bs4 import BeautifulSoup
import json
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
def run_process(page_number):
page_number = 1
base_url = f'https://www.jaap.nl/koophuizen/noord+holland/groot-amsterdam/amsterdam/p{page_number}'
response = requests.get(base_url)
soup = BeautifulSoup(response.text, 'html.parser')
jsonStr = soup.find('script', {'id':'page-data'}).text
jsonData = json.loads(jsonStr)
df = json_normalize(jsonData['properties'])
root_URL = 'https://jaap.nl'
df['detailsUrl'] = root_URL + df['detailsUrl']
allPropDetails = pd.DataFrame()
for idx, row in df.iterrows():
propDetails = pd.DataFrame(index=[0])
w=1
detailLink = row['detailsUrl']
print ('Scraping: %s' %(row['location.street']))
dfs = pd.read_html(detailLink)
for each in dfs:
#each = dfs[8]
w=1
if each.isnull().all().all():
continue
each = each.dropna(axis=0, how='all')
specialCase = False
for col in list(each.columns):
if each[col].dtypes == 'object':
if each[col].str.contains('Voorziening').any():
specialCase = True
break
if specialCase == True:
df_obj = each.select_dtypes(['object'])
each[df_obj.columns] = df_obj.apply(lambda x: x.str.rstrip('. '))
cols1 = list(each.iloc[2:,0])
each = each.iloc[2:,:]
each[1] = each[1] + '---' + each[2]
each = each.iloc[:,-2]
each.index = cols1
each = each.to_frame().T
propRow = each
propRow.index = [0]
temp_df = pd.DataFrame(index=[0])
for col in propRow.columns:
temp_df = temp_df.merge(propRow[col].str.split('---', expand=True).rename(columns={0:col, 1:col+'.distance'}),left_index=True, right_index=True )
propRow = temp_df
else:
df_obj = each.select_dtypes(['object'])
each[df_obj.columns] = df_obj.apply(lambda x: x.str.rstrip('. '))
temp_df = each.T
cols = [ temp_df.index[0] + '_' + colName for colName in list(temp_df.iloc[0,:]) ]
propRow = temp_df.iloc[-1,:]
propRow.index = cols
propRow = propRow.to_frame().T
propRow.index = [0]
propDetails = propDetails.merge(propRow, left_index=True, right_index=True)
propDetails.index = [idx]
allPropDetails = allPropDetails.append(propDetails, sort=True)
df = df.merge(allPropDetails, how = 'left', left_index=True, right_index=True)
return df
if __name__ == '__main__':
# set variables
start_time = time()
current_page = 1
output_timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
output_filename = f'C:/test/output_{output_timestamp}.csv'
final_df = pd.DataFrame()
while current_page <= 3:
print(f'Scraping page #{current_page}...')
df = run_process(current_page)
final_df = final_df.append(df, sort=True).reset_index(drop=True)
current_page = current_page + 1
final_df.to_csv(output_filename, index=False)
end_time = time()
elapsed_time = end_time - start_time
print(f'Elapsed run time: {elapsed_time} seconds')

Related

unable to implement explicit wait in the code

I am trying to apply explicit wait in the code till the page loads and then I can extract the data. I have tried this solution however I dont know where to insert the same in the code.
browser.implicitly_wait does not seem to work and I dont know why.
code:
import os
import threading
from math import nan
from multiprocessing.pool import ThreadPool
import pandas as pd
from bs4 import BeautifulSoup as bs
from selenium import webdriver
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
# Un-comment next line to supress logging:
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(options=options)
def __del__(self):
self.driver.quit() # clean up driver when we are cleaned up
# print('The driver has been "quitted".')
threadLocal = threading.local()
def create_driver():
the_driver = getattr(threadLocal, 'the_driver', None)
if the_driver is None:
the_driver = Driver()
setattr(threadLocal, 'the_driver', the_driver)
return the_driver.driver
class GameData:
def __init__(self):
self.date = []
self.time = []
self.game = []
self.score = []
self.home_odds = []
self.draw_odds = []
self.away_odds = []
self.country = []
self.league = []
def generate_matches(pgSoup, defaultVal=None):
evtSel = {
'time': 'p.whitespace-nowrap',
'game': 'a div:has(>a[title])',
'score': 'a:has(a[title])+div.hidden',
'home_odds': 'a:has(a[title])~div:not(.hidden)',
'draw_odds': 'a:has(a[title])~div:not(.hidden)+div:nth-last-of-type(3)',
'away_odds': 'a:has(a[title])~div:nth-last-of-type(2)',
}
events, current_group = [], {}
pgDate = pgSoup.select_one('h1.title[id="next-matches-h1"]')
if pgDate: pgDate = pgDate.get_text().split(',', 1)[-1].strip()
for evt in pgSoup.select('div[set]>div:last-child'):
if evt.parent.select(f':scope>div:first-child+div+div'):
cgVals = [v.get_text(' ').strip() if v else defaultVal for v in [
evt.parent.select_one(s) for s in
[':scope>div:first-child+div>div:first-child',
':scope>div:first-child>a:nth-of-type(2):nth-last-of-type(2)',
':scope>div:first-child>a:nth-of-type(3):last-of-type']]]
current_group = dict(zip(['date', 'country', 'league'], cgVals))
if pgDate: current_group['date'] = pgDate
evtRow = {'date': current_group.get('date', defaultVal)}
for k, v in evtSel.items():
v = evt.select_one(v).get_text(' ') if evt.select_one(v) else defaultVal
evtRow[k] = ' '.join(v.split()) if isinstance(v, str) else v
evtTeams = evt.select('a div>a[title]')
evtRow['game'] = ' – '.join(a['title'] for a in evtTeams)
evtRow['country'] = current_group.get('country', defaultVal)
evtRow['league'] = current_group.get('league', defaultVal)
events.append(evtRow)
return events
def parse_data(url, return_urls=False):
browser = create_driver()
browser.get(url)
browser.implicitly_wait(30) # I could not get Explicit wait to work here. implicity_wait does not seem to work at all.
soup = bs(browser.page_source, "lxml")
game_data = GameData()
game_keys = [a for a, av in game_data.__dict__.items() if isinstance(av, list)]
for row in generate_matches(soup, defaultVal=nan):
for k in game_keys: getattr(game_data, k).append(row.get(k, nan))
if return_urls:
if return_urls:
a_cont = soup.find('div', {'class': 'tabs'})
if a_cont is None:
a_tags = []
else:
a_tags = a_cont.find_all('a', {'class': 'h-8', 'href': True})
urls = [
'https://www.oddsportal.com' + a_tag['href'] for a_tag in a_tags
if not a_tag['href'].startswith('#') # sections in current page
and 'active-item-calendar' not in a_tag['class'] # current page
]
print(pd.DataFrame(urls, columns=['urls']))
return game_data, urls
return game_data
if __name__ == '__main__':
games = None
pool = ThreadPool(5)
# Get today's data and the Urls for the other days:
url_today = 'https://www.oddsportal.com/matches/soccer'
game_data_today, urls = pool.apply(parse_data, args=(url_today, True))
game_data_results = pool.imap(parse_data, urls)
############################ BUILD DATAFRAME ############################
game_n, added_todayGame = 0, False
for game_data in game_data_results:
try:
game_n += 1
gd_df = pd.DataFrame(game_data.__dict__)
games = gd_df if games is None else pd.concat([games, gd_df])
if not added_todayGame:
game_n += 1
gdt_df = pd.DataFrame(game_data_today.__dict__)
games, added_todayGame = pd.concat([games, gdt_df]), True
except Exception as e:
print(f'Error tabulating game_data_df#{game_n}:\n{repr(e)}')
##########################################################################
print('!?NO GAMES?!' if games is None else games) ## print(games)
# ensure all the drivers are "quitted":
del threadLocal # a little extra insurance
import gc
gc.collect()
Where would I insert explicit wait till the page loads fully and then extract the dataframe games?

Iterate over URLS for Webscraping using BeautifulSoup

This is my code to scrape odds from www.oddsportal.com.
import pandas as pd
from bs4 import BeautifulSoup as bs
from selenium import webdriver
import threading
from multiprocessing.pool import ThreadPool
import os
import re
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
# Un-comment next line to supress logging:
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(options=options)
def __del__(self):
self.driver.quit() # clean up driver when we are cleaned up
# print('The driver has been "quitted".')
threadLocal = threading.local()
def create_driver():
the_driver = getattr(threadLocal, 'the_driver', None)
if the_driver is None:
the_driver = Driver()
setattr(threadLocal, 'the_driver', the_driver)
return the_driver.driver
class GameData:
def __init__(self):
self.date = []
self.time = []
self.game = []
self.score = []
self.home_odds = []
self.draw_odds = []
self.away_odds = []
self.country = []
self.league = []
def generate_matches(table):
global country, league
tr_tags = table.findAll('tr')
for tr_tag in tr_tags:
if 'class' not in tr_tag.attrs:
continue
tr_class = tr_tag['class']
if 'dark' in tr_class:
th_tag = tr_tag.find('th', {'class': 'first2 tl'})
a_tags = th_tag.findAll('a')
country = a_tags[0].text
league = a_tags[1].text
elif 'deactivate' in tr_class:
td_tags = tr_tag.findAll('td')
yield td_tags[0].text, td_tags[1].text, td_tags[2].text, td_tags[3].text, \
td_tags[4].text, td_tags[5].text, country, league
def parse_data(url):
browser = create_driver()
browser.get(url)
soup = bs(browser.page_source, "lxml")
div = soup.find('div', {'id': 'col-content'})
table = div.find('table', {'class': 'table-main'})
h1 = soup.find('h1').text
m = re.search(r'\d+ \w+ \d{4}$', h1)
game_date = m[0]
game_data = GameData()
for row in generate_matches(table):
game_data.date.append(game_date)
game_data.time.append(row[0])
game_data.game.append(row[1])
game_data.score.append(row[2])
game_data.home_odds.append(row[3])
game_data.draw_odds.append(row[4])
game_data.away_odds.append(row[5])
game_data.country.append(row[6])
game_data.league.append(row[7])
return game_data
# URLs go here
urls = {
"https://www.oddsportal.com/matches/soccer/20210903/",
}
if __name__ == '__main__':
results = None
# To limit the number of browsers we will use
# (set to a large number if you don't want a limit):
MAX_BROWSERS = 5
pool = ThreadPool(min(MAX_BROWSERS, len(urls)))
for game_data in pool.imap(parse_data, urls):
result = pd.DataFrame(game_data.__dict__)
if results is None:
results = result
else:
results = results.append(result, ignore_index=True)
print(results)
# print(results.head())
# ensure all the drivers are "quitted":
del threadLocal
import gc
gc.collect() # a little extra insurance
Currently, the code just gets data for one urls. I would like
I am trying to integrate this part into my code that allows the pages to be iterated over all the links for "Yesterday, today, tomorrow and the next 5 days" as below:
This part of another code allows to get the URLs.
browser = webdriver.Chrome()
def get_urls(browser, landing_page):
browser.get(landing_page)
urls = [i.get_attribute('href') for i in
browser.find_elements_by_css_selector(
'.next-games-date > a:nth-child(1), .next-games-date > a:nth-child(n+3)')]
return urls
....
if __name__ == '__main__':
start_url = "https://www.oddsportal.com/matches/soccer/"
urls = []
browser = webdriver.Chrome()
results = None
urls = get_urls(browser, start_url)
urls.insert(0, start_url)
for number, url in enumerate(urls):
if number > 0:
browser.get(url)
html = browser.page_source
game_data = parse_data(html)
if game_data is None:
continue
result = pd.DataFrame(game_data.__dict__)
How do I get the urls to integrate with my code and iterate to provide me with one single dataframe?
I had to make some adjustments to function generate_matches since the returning of certain class names was not reliable. And I removed global statements from that function that I never have never should have had.
import pandas as pd
from bs4 import BeautifulSoup as bs
from selenium import webdriver
import threading
from multiprocessing.pool import ThreadPool
import os
import re
class Driver:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument("--headless")
# Un-comment next line to supress logging:
options.add_experimental_option('excludeSwitches', ['enable-logging'])
self.driver = webdriver.Chrome(options=options)
def __del__(self):
self.driver.quit() # clean up driver when we are cleaned up
# print('The driver has been "quitted".')
threadLocal = threading.local()
def create_driver():
the_driver = getattr(threadLocal, 'the_driver', None)
if the_driver is None:
the_driver = Driver()
setattr(threadLocal, 'the_driver', the_driver)
return the_driver.driver
class GameData:
def __init__(self):
self.date = []
self.time = []
self.game = []
self.score = []
self.home_odds = []
self.draw_odds = []
self.away_odds = []
self.country = []
self.league = []
def generate_matches(table):
tr_tags = table.findAll('tr')
for tr_tag in tr_tags:
if 'class' in tr_tag.attrs and 'dark' in tr_tag['class']:
th_tag = tr_tag.find('th', {'class': 'first2 tl'})
a_tags = th_tag.findAll('a')
country = a_tags[0].text
league = a_tags[1].text
else:
td_tags = tr_tag.findAll('td')
yield td_tags[0].text, td_tags[1].text, td_tags[2].text, td_tags[3].text, \
td_tags[4].text, td_tags[5].text, country, league
def parse_data(url, return_urls=False):
browser = create_driver()
browser.get(url)
soup = bs(browser.page_source, "lxml")
div = soup.find('div', {'id': 'col-content'})
table = div.find('table', {'class': 'table-main'})
h1 = soup.find('h1').text
m = re.search(r'\d+ \w+ \d{4}$', h1)
game_date = m[0]
game_data = GameData()
for row in generate_matches(table):
game_data.date.append(game_date)
game_data.time.append(row[0])
game_data.game.append(row[1])
game_data.score.append(row[2])
game_data.home_odds.append(row[3])
game_data.draw_odds.append(row[4])
game_data.away_odds.append(row[5])
game_data.country.append(row[6])
game_data.league.append(row[7])
if return_urls:
span = soup.find('span', {'class': 'next-games-date'})
a_tags = span.findAll('a')
urls = ['https://www.oddsportal.com' + a_tag['href'] for a_tag in a_tags]
return game_data, urls
return game_data
if __name__ == '__main__':
results = None
pool = ThreadPool(5) # We will be getting, however, 7 URLs
# Get today's data and the Urls for the other days:
game_data_today, urls = pool.apply(parse_data, args=('https://www.oddsportal.com/matches/soccer', True))
urls.pop(1) # Remove url for today: We already have the data for that
game_data_results = pool.imap(parse_data, urls)
for i in range(8):
game_data = game_data_today if i == 1 else next(game_data_results)
result = pd.DataFrame(game_data.__dict__)
if results is None:
results = result
else:
results = results.append(result, ignore_index=True)
print(results)
# print(results.head())
# ensure all the drivers are "quitted":
del threadLocal
import gc
gc.collect() # a little extra insurance
Prints:
date time game score home_odds draw_odds away_odds country league
0 07 Sep 2021 00:00 Pachuca W - Monterrey W  0:1 +219 +280 -106  Mexico Liga MX Women
1 07 Sep 2021 01:05 Millonarios - Patriotas 1:0 -303 +380 +807  Colombia Primera A
2 07 Sep 2021 02:00 Club Tijuana W - Club Leon W  4:0 -149 +293 +311  Mexico Liga MX Women
3 07 Sep 2021 08:30 Suzhou Dongwu - Nanjing City 0:0 +165 +190 +177  China Jia League
4 07 Sep 2021 08:45 Kuching City FC - Sarawak Utd. 1:0 +309 +271 -143  Malaysia Premier League
... ... ... ... ... ... ... ... ... ...
1305 14 Sep 2021 21:45 Central Cordoba - Atl. Tucuman +192 +217 +146 13  Argentina Liga Profesional
1306 14 Sep 2021 22:00  Colo Colo - Everton -141 +249 +395 11  Chile Primera Division
1307 14 Sep 2021 23:30  Columbus Crew - New York Red Bulls - - - 1  USA MLS
1308 14 Sep 2021 23:30  New York City - FC Dallas - - - 1  USA MLS
1309 14 Sep 2021 23:30  Toronto FC - Inter Miami - - - 1  USA MLS
[1310 rows x 9 columns]
I'd suggest you to integrate this method when iterating over urls.
Code snippet-
#assuming you have a list of start_urls
start_urls=['https://www.oddsportal.com/matches/soccer/20210903/']
urls=[]
#get links for Yesterday, today, tomorrow and the next 5 days
for start_url in start_urls:
driver.get(start_url)
html_source=driver.page_source
soup=BeautifulSoup(html_source,'lxml')
dates=soup.find('span',class_='next-games-date')
links=dates.find_all('a')
for link in links:
urls.append(('https://www.oddsportal.com'+link['href']))
#get data from each link
for url in urls:
driver.get(url)
#function call to parse data
#function call to append data

IndexError: list index out of range while webscraping

This code is giving an IndexError: list index out of range
import pandas as pd
from selenium import webdriver
from datetime import datetime
from bs4 import BeautifulSoup as bs
from math import nan
browser = webdriver.Chrome()
class GameData:
def __init__(self):
self.score = []
self.date = []
self.time = []
self.country = []
self.league = []
self.game = []
self.home_odds = []
self.draw_odds = []
self.away_odds = []
def append(self, score):
pass
def get_urls(browser, landing_page):
browser.get(landing_page)
urls = [i.get_attribute('href') for i in
browser.find_elements_by_css_selector(
'.next-games-date > a:nth-child(1), .next-games-date > a:nth-child(n+3)')]
return urls
def parse_data(html):
global league
df = pd.read_html(html, header=0)[0]
# print(len(df.index))
# print(df.columns)
html = browser.page_source
soup = bs(html, "lxml")
# print(len(soup.select('#table-matches tr')))
scores = [i.select_one('.table-score').text if i.select_one('.table-score') is not None else nan for i in
soup.select('#table-matches tr:nth-of-type(n+2)')]
cont = soup.find('div', {'id': 'wrap'})
content = cont.find('div', {'id': 'col-content'})
content = content.find('table', {'class': 'table-main'}, {'id': 'table-matches'})
main = content.find('th', {'class': 'first2 tl'})
if main is None:
return None
count = main.findAll('a')
country = count[0].text
game_data = GameData()
game_date = datetime.strptime(soup.select_one('.bold')['href'].split('/')[-2], '%Y%m%d').date()
leagues = [i.text for i in soup.select('.first2 > a:last-child')]
n = 0
for number, row in enumerate(df.itertuples()):
if n == 0 or '»' in row[1]:
league = leagues[n]
n += 1
if not isinstance(row[1], str):
continue
elif ':' not in row[1]:
country = row[1].split('»')[0]
continue
game_time = row[1]
print(len(scores))
print(len(scores))
game_data.date.append(game_date)
game_data.time.append(game_time)
game_data.country.append(country)
game_data.league.append(league)
game_data.game.append(row[2])
game_data.score.append(scores[number])
game_data.home_odds.append(row[4])
game_data.draw_odds.append(row[5])
game_data.away_odds.append(row[6])
return game_data
if __name__ == '__main__':
start_url = "https://www.oddsportal.com/matches/soccer/"
urls = []
browser = webdriver.Chrome()
results = None
urls = get_urls(browser, start_url)
urls.insert(0, start_url)
for number, url in enumerate(urls):
if number > 0:
browser.get(url)
html = browser.page_source
game_data = parse_data(html)
if game_data is None:
continue
result = pd.DataFrame(game_data.__dict__)
if results is None:
results = result
else:
results = results.append(result, ignore_index=True)
when I try to find out about the error by
print(len(scores))
print(scores[number])
346
2:3
346
0:2
346
1:3
346
1:1
......
Traceback (most recent call last):
File "C:\Users\harsh\AppData\Roaming\JetBrains\PyCharmCE2021.2\scratches\scratch_10.py", line 112, in <module>
game_data = parse_data(html)
File "C:\Users\harsh\AppData\Roaming\JetBrains\PyCharmCE2021.2\scratches\scratch_10.py", line 84, in parse_data
print(scores[number])
IndexError: list index out of range
While
print(scores[number])
is
2:3
0:2
1:1
on a good day
How can i resolve this?

Python web scraper not getting certain values

I'm having trouble with my web scraper not getting the "Odds" values and not sure what is wrong. For each piece of information, I am using a try/except to see if the element is available. I'm not sure what is wrong with getting the Odds values though. Thanks for the help
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
url = 'https://www.ncaagamesim.com/college-basketball-predictions.asp'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.find('table')
# Get column names
headers = table.find_all('th')
cols = [x.text for x in headers]
# Get all rows in table body
table_rows = table.find_all('tr')
rows = []
# Grab the text of each td, and put into a rows list
for each in table_rows[1:]:
odd_avail = True
data = each.find_all('td')
time = data[0].text.strip()
# Get matchup and odds
try:
matchup, odds = data[1].text.strip().split('\xa0')
odd_margin = float(odds.split('by')[-1].strip())
except:
matchup = data[1].text.strip()
odd_margin = '-'
odd_avail = False
# Get favored team
try:
odd_team_win = data[1].find_all('img')[-1]['title']
except:
odd_team_win = '-'
odd_avail = False
# Get simulation winner
try:
sim_team_win = data[2].find('img')['title']
except:
sim_team_win = '-'
odd_avail = False
awayTeam = matchup.split('#')[0].strip()
homeTeam = matchup.split('#')[1].strip()
# Get simulation margin
try:
sim_margin = float(re.findall("\d+\.\d+", data[2].text)[-1])
except:
sim_margin = '-'
odd_avail = False
# If all variables available, determine odds, simulation margin points, and optimal bet
if odd_avail == True:
if odd_team_win == sim_team_win:
diff = abs(sim_margin - odd_margin)
if sim_margin > odd_margin:
bet = odd_team_win
else:
if odd_team_win == homeTeam:
bet = awayTeam
else:
bet = homeTeam
else:
diff = odd_margin + sim_margin
bet = sim_team_win
else:
diff = -1
bet = '-'
# Create table
row = {cols[0]: time, 'Matchup': matchup, 'Odds Winner': odd_team_win, 'Odds': odd_margin,
'Simulation Winner': sim_team_win, 'Simulation Margin': sim_margin, 'Diff': diff, 'Bet' : bet}
rows.append(row)
df = pd.DataFrame(rows)
df = df.sort_values(by = ['Diff'], ascending = False)
print (df.to_string())
# df.to_csv('odds.csv', index=False)
When I run this code everything works perfectly and gets all other values but all the odds values in the table are '-'.
I added a few things into the code, to account for
If the odds are Even (versus if there are no odds
If a team doesn't have a logo, to still but the team name
As far as the odds not showing. Check the csv file to see if it's there. If it is, might just be a preference you need to change in pycharm (might be just cutting off some of the string)
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
url = 'https://www.ncaagamesim.com/college-basketball-predictions.asp'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.find('table')
# Get column names
headers = table.find_all('th')
cols = [x.text for x in headers]
# Get all rows in table body
table_rows = table.find_all('tr')
rows = []
# Grab the text of each td, and put into a rows list
for each in table_rows[1:]:
odd_avail = True
data = each.find_all('td')
time = data[0].text.strip()
# Get matchup and odds
try:
matchup, odds = data[1].text.strip().split('\xa0')
odd_margin = float(odds.split('by')[-1].strip())
except:
matchup = data[1].text.strip()
if 'Even' in matchup:
matchup, odds = data[1].text.strip().split('\xa0')
odd_margin = 0
else:
odd_margin = '-'
odd_avail = False
awayTeam = matchup.split('#')[0].strip()
homeTeam = matchup.split('#')[1].strip()
# Get favored team
try:
odd_team_win = data[1].find_all('img')[-1]['title']
except:
odd_team_win = '-'
odd_avail = False
# Get simulation winner
try:
sim_team_win = data[2].find('img')['title']
except:
if 'wins' in data[2].text:
sim_team_win = data[2].text.split('wins')[0].strip()
else:
sim_team_win = '-'
odd_avail = False
# Get simulation margin
try:
sim_margin = float(re.findall("\d+\.\d+", data[2].text)[-1])
except:
sim_margin = '-'
odd_avail = False
# If all variables available, determine odds and simulation margin points
if odd_avail == True:
if odd_team_win == sim_team_win:
diff = abs(sim_margin - odd_margin)
else:
diff = odd_margin + sim_margin
else:
diff = '-'
# Create table
row = {cols[0]: time, 'Away Team': awayTeam, 'Home Team':homeTeam, 'Odds Winner': odd_team_win, 'Odds': odd_margin,
'Simulation Winner': sim_team_win, 'Simulation Margin': sim_margin, 'Diff': diff}
rows.append(row)
df = pd.DataFrame(rows)
print (df.to_string())
# df.to_csv('odds.csv', index=False)

BeautifulSoup find.all() web scraping returns empty

When trying to scrape multiple pages of this website, I get no content in return. I usually check to make sure all the lists I'm creating are of equal length, but all are coming back as len = 0.
I've used similar code to scrape other websites, so why does this code not work correctly?
Some solutions I've tried, but haven't worked for my purposes: requests.Session() solutions as suggested in this answer, .json as suggested here.
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
from random import randint
from googletrans import Translator
translator = Translator()
rg = []
ctr_n = []
ctr = []
yr = []
mn = []
sub = []
cst_n = []
cst = []
mag = []
pty_n = []
pty = []
can = []
pev1 = []
vot1 = []
vv1 = []
ivv1 = []
to1 = []
cv1 = []
cvs1 = []
pv1 = []
pvs1 = []
pev2 = []
vot2 = []
vv2 = []
ivv2 = []
to2 = []
cv2 = []
cvs2 =[]
pv2 = []
pvs2 = []
seat = []
no_info = []
manual = []
START_PAGE = 1
END_PAGE = 42
for page in range(START_PAGE, END_PAGE + 1):
page = requests.get("https://sejmsenat2019.pkw.gov.pl/sejmsenat2019/en/wyniki/sejm/okr/" + str(page))
page.encoding = page.apparent_encoding
if not page:
pass
else:
soup = BeautifulSoup(page.text, 'html.parser')
tbody = soup.find_all('table', class_='table table-borderd table-striped table-hover dataTable no-footer clickable right2 right4')
sleep(randint(2,10))
for container in tbody:
col1 = container.find_all('tr', {'data-id':'26079'})
for info in col1:
col_1 = info.find_all('td')
for data in col_1:
party = data[0]
party_trans = translator.translate(party)
pty_n.append(party_trans)
pvotes = data[1]
pv1.append(pvotes)
pshare = data[2]
pvs1.append(pshare)
mandates = data[3]
seat.append(mandates)
col2 = container.find_all('tr', {'data-id':'26075'})
for info in col2:
col_2 = info.find_all('td')
for data in col_2:
party2 = data[0]
party_trans2 = translator.translate(party2)
pty_n.append(party_trans2)
pvotes2 = data[1]
pv1.append(pvotes2)
pshare2 = data[2]
pvs1.append(pshare2)
mandates2 = data[3]
seat.append(mandates2)
col3 = container.find_all('tr', {'data-id':'26063'})
for info in col3:
col_3 = info.find_all('td')
for data in col_3:
party3 = data[0].text
party_trans3 = translator.translate(party3)
pty_n.extend(party_trans3)
pvotes3 = data[1].text
pv1.extend(pvotes3)
pshare3 = data[2].text
pvs1.extend(pshare3)
mandates3 = data[3].text
seat.extend(mandates3)
col4 = container.find_all('tr', {'data-id':'26091'})
for info in col4:
col_4 = info.find_all('td',recursive=True)
for data in col_4:
party4 = data[0]
party_trans4 = translator.translate(party4)
pty_n.extend(party_trans4)
pvotes4 = data[1]
pv1.extend(pvotes4)
pshare4 = data[2]
pvs1.extend(pshare4)
mandates4 = data[3]
seat.extend(mandates4)
col5 = container.find_all('tr', {'data-id':'26073'})
for info in col5:
col_5 = info.find_all('td')
for data in col_5:
party5 = data[0]
party_trans5 = translator.translate(party5)
pty_n.extend(party_trans5)
pvotes5 = data[1]
pv1.extend(pvotes5)
pshare5 = data[2]
pvs1.extend(pshare5)
mandates5 = data[3]
seat.extend(mandates5)
col6 = container.find_all('tr', {'data-id':'26080'})
for info in col6:
col_6 = info.find_all('td')
for data in col_6:
party6 = data[0]
party_trans6 = translator.translate(party6)
pty_n.extend(party_trans6)
pvotes6 = data[1]
pv1.extend(pvotes6)
pshare6 = data[2]
pvs1.extend(pshare6)
mandates6 = data[3]
seat.extend(mandates6)
#### TOTAL VOTES ####
tfoot = soup.find_all('tfoot')
for data in tfoot:
fvote = data.find_all('td')
for info in fvote:
votefinal = info.find(text=True).get_text()
fvoteindiv = [votefinal]
fvotelist = fvoteindiv * (len(pty_n) - len(vot1))
vot1.extend(fvotelist)
#### CONSTITUENCY NAMES ####
constit = soup.find_all('a', class_='btn btn-link last')
for data in constit:
names = data.get_text()
names_clean = names.replace("Sejum Constituency no.","")
names_clean2 = names_clean.replace("[","")
names_clean3 = names_clean2.replace("]","")
namesfinal = names_clean3.split()[1]
constitindiv = [namesfinal]
constitlist = constitindiv * (len(pty_n) - len(cst_n))
cst_n.extend(constitlist)
#### UNSCRAPABLE INFO ####
region = 'Europe'
reg2 = [region]
reglist = reg2 * (len(pty_n) - len(rg))
rg.extend(reglist)
country = 'Poland'
ctr2 = [country]
ctrlist = ctr2 * (len(pty_n) - len(ctr_n))
ctr_n.extend(ctrlist)
year = '2019'
yr2 = [year]
yrlist = yr2 * (len(pty_n) - len(yr))
yr.extend(yrlist)
month = '10'
mo2 = [month]
molist = mo2 * (len(pty_n) - len(mn))
mn.extend(molist)
codes = ''
codes2 = [codes]
codeslist = codes2 * (len(pty_n) - len(manual))
manual.extend(codeslist)
noinfo = '-990'
noinfo2 = [noinfo]
noinfolist = noinfo2 * (len(pty_n) - len(no_info))
no_info.extend(noinfolist)
print(len(rg), len(pty_n), len(pv1), len(pvs1), len(no_info), len(vot1), len(cst_n))
poland19 = pd.DataFrame({
'rg' : rg,
'ctr_n' : ctr_n,
'ctr': manual,
'yr' : yr,
'mn' : mn,
'sub' : manual,
'cst_n': cst_n,
'cst' : manual,
'mag': manual,
'pty_n': pty_n,
'pty': manual,
'can': can,
'pev1': no_info,
'vot1': vot1,
'vv1': vot1,
'ivv1': no_info,
'to1': no_info,
'cv1': no_info,
'cvs1': no_info,
'pv1': cv1,
'pvs1': cvs1,
'pev2': no_info,
'vot2': no_info,
'vv2': no_info,
'ivv2': no_info,
'to2': no_info,
'cv2': no_info,
'cvs2': no_info,
'pv2' : no_info,
'pvs2' : no_info,
'seat' : manual
})
print(poland19)
poland19.to_csv('poland_19.csv')
As commented you probably need to use Selenium. You could replace the requests lib and replace the request statements with sth like this:
from selenium import webdriver
wd = webdriver.Chrome('pathToChromeDriver') # or any other Browser driver
wd.get(url) # instead of requests.get()
soup = BeautifulSoup(wd.page_source, 'html.parser')
You need to follow the instructions to install and implement the selenium lib at this link: https://selenium-python.readthedocs.io/
Note: I tested your code with selenium and I was able to get the table that you were looking for, but with the class_=... does not work for some reason.
Instead browsing at the scraped data I found that it has an attribute id. So maybe try also this instead:
tbody = soup.find_all('table', id="DataTables_Table_0")
And again, by doing the get requests with the selenium lib.
Hope that was helpful :)
Cheers

Categories

Resources