driver = webdriver.Chrome(ChromeDriverManager().install())
url = 'www.mywebsite.com'
driver.get(url)
response = requests.get(url)
markup = driver.page_source
soup = BeautifulSoup(markup, 'lxml')
for _ in range(50):
driver.find_element_by_tag_name('body').send_keys(Keys.END) # Move the page down
element = driver.find_element_by_class_name('prevnext')
master_list = []
for name in soup.find_all(itemprop='name'):
data_dict = {}
data_dict['company name'] = name.get_text(strip=True, separator = '\n')
master_list.append(data_dict)
df = pd.DataFrame(master_list)
print('Page scraped')
time.sleep(5)
print('Sleeping for 2..')
print('Is the button enabled : ' + str(element.is_enabled()))
print('Is the button visible : ' + str(element.is_displayed()))
element.click();
print('Clicked Next')
driver.implicitly_wait(2)
# # for _ in range(1):
# # print('waiting 10')
# driver.find_element_by_class_name('submit-btn').click()
print('Finished Scraping')
I Need this to run through 50 pages. It scrapes the first one, and flips through the other ones. However, at the end only the first one is scraped and added to df. Every page has 20 records. I believe my indentation is wrong. Any help appreciated.
It seems you made a small mistake.
markup = driver.page_source
soup = BeautifulSoup(markup, 'lxml')
Remove this line from your code and add it to start of your for loop as you would also need to get the source every time you click because new content is loaded everytime.
Related
My code below will open a website and scrape values into an array and plot. Notice at the bottom, One can comment out "driver.quit()" and when the Python code stops, the webpage of interest is still open. At a short time later, I would like to soft start the Python code and continue reading from the website. My attempt was to print out the value for the driver and skip to this value without having to open a new page. Once I am on the welcome page it takes a lot of time/effort to get to the desired page and I would like to avoid that. Look at the third line of code where I have pasted the value of the driver for the session that is currently open. Python does not like that. Is there a way to continue on that session while it is open in Python?
driver = webdriver.Chrome (executable_path="C:\chromedriver.exe")
driver.get("https://my_example.com/welcome")
#driver = <selenium.webdriver.chrome.webdriver.WebDriver (session="1a636e51f3d40bd9b66996e3d52d945b")>
my_name = ["nm25", "nm26", "nm27", "nm33", "nm38", "nm41", "nm45", ]
data_points = 450
my_file = np.zeros((13, 7, data_points))
x = []
y = []
soup = "chicken" # Initialization constant
while soup.find(my_name[0]) == -1:
source = driver.page_source
soup = BeautifulSoup(source, "lxml")
soup = (soup.get_text())
time.sleep(5)
tim = time.time()
my_cntr =0
plt.title("Title")
plt.xlabel(" Time")
plt.ylabel("y axis amplitude")
for i in range(data_points):
source = driver.page_source
soup = BeautifulSoup(source, "lxml") # was "html.parser"
soup3 = (soup.get_text())
soup2 = (soup.get_text("**", strip=True)) # adds "**" between values for ease of reading
x_pos = soup2.find(my_name[0]) # Remove all but First name
soup2 = soup2[x_pos-2:] # Data starts with "**"
for j in range(len(my_name)): # Go through all of the names
for k in range(0, 13): # The number of values to read per name
soup2 = soup2[2:] # Remove **
x_pos = soup2.find('**')
if k < 2 or k==6:
my_file[k, j, i] =time.time() - tim # adds time stamp
else:
my_file[k, j, i] = soup2[:x_pos]
soup2 = soup2[x_pos:]
if ( k== 7) and j==0 :
x.append(my_file[0,0,i] )
y.append(my_file[7,0,i] )
my_cntr = my_cntr +1
if my_cntr/20 == int(my_cntr/20):
plt.plot(x, y)
plt.pause(0.2)
plt.show()
driver.quit() # remove this line to leave the browser open
Try getting the session id and setting the value. You should probably save the session id in a config.py file, json file, txt file, etc. and access it later
driver = webdriver.Chrome (executable_path="C:\chromedriver.exe")
driver.get("https://my_example.com/welcome")
# do stuff and then save the session id at the end
session_id = driver.session_id
import json
# save the session id in a json file
with open("sample.json", "w") as f:
f.write(json.dumps({'session_id': session_id}))
Once the code finishes running and the selenium browser is still open, you should be able to have another .py file that you will run to get the session id you just saved in a json file so you can access the open browser again
import json
# open the json file
with open('sample.json', 'r') as f:
session_id = json.load(f)['session_id']
driver.session_id = sessioin_id
# do more stuff
I scrape data from this url then click the Next button and wait 10 seconds before using requests and bs4 to scrape the next page but the url doesn't change so I just end up scraping the original page data twice. I've tried WebDriverWait until elements on the first page become stale as well as trying to use requests to get the xhr log api call directly (I am not well-versed in ajax however) and can't find a solution. Here is the code as it stands:
loop = True
while loop:
try:
current_url = driver.current_url
next_btn = WebDriverWait(driver,5).until(EC.element_to_be_clickable((By.XPATH, '//button[text()="Next"]')))
actions = ActionChains(driver)
actions.move_to_element(next_btn).perform()
if next_btn:
next_btn.click()
except Exception as e:
current_url = driver.current_url
loop = False
print(e,f"somewhere in {current_url} while loop")
else:
time.sleep(10)
next_page = driver.current_url
get_page_content(next_page)
break
Here is the URL of the first page: https://www.hunterdouglas.com/locator/results?address=San%20Ramon&country=US&source=
Any direction would be appreciated! Thank you!
For anyone who is interested I got this to work by just using selenium. Here is the code the argument data is just the name of the city I'm submitting to the master_function(data)
def get_links(page):
for p in page:
for l in p.find_elements_by_tag_name("a"):
link = l.get_attribute('href')
if link != None:
link_split = link.split('\n')
for l in link_split:
if "http" in link:
test_list.append(link)
def master_function(data):
for d in data:
base_url = "https://www.hunterdouglas.com/locator"
driver.get(base_url)
url = pop_up_one(driver)
submit(url,driver,d)
loop = True
while loop:
try:
current_url = driver.current_url
next_btn = WebDriverWait(driver,5).until(EC.element_to_be_clickable((By.XPATH, '//button[text()="Next"]')))
actions = ActionChains(driver)
actions.move_to_element(next_btn).perform()
if next_btn:
next_btn.click()
except Exception as e:
current_url = driver.current_url
loop = False
print(e,f"somewhere in {current_url} while loop")
else:
time.sleep(1)
page = WebDriverWait(driver,5).until(EC.presence_of_all_elements_located((By.XPATH, '//div[#id="loc-results"]')))
get_links(page)
Very new to Python and Selenium, looking to scrape a few data points. I'm struggling in three areas:
I don't understand how to loop through multiple URLs properly
I can't figure out why the script is iterating twice over each URL
I can't figure out why it's only outputting the data for the second URL
Much thanks for taking a look!
Here's my current script:
urls = [
'https://developers.google.com/speed/pagespeed/insights/?url=https://www.crutchfield.com/%2F&tab=mobile',
'https://developers.google.com/speed/pagespeed/insights/?url=https://www.lastpass.com%2F&tab=mobile'
]
driver = webdriver.Chrome(executable_path='/Library/Frameworks/Python.framework/Versions/3.9/bin/chromedriver')
for url in urls:
for page in range(0, 1):
driver.get(url)
wait = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.CLASS_NAME, 'origin-field-data')))
df = pd.DataFrame(columns = ['Title', 'Core Web Vitals', 'FCP', 'FID', 'CLS', 'TTI', 'TBT', 'Total Score'])
company = driver.find_elements_by_class_name("audited-url__link")
data = []
for i in company:
data.append(i.get_attribute('href'))
for x in data:
#Get URL name
title = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[1]/div/div[2]/h1/a')
co_name = title.text
#Get Core Web Vitals text pass/fail
cwv = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[2]/div/div[1]/div[1]/div[1]/span[2]')
core_web = cwv.text
#Get FCP
fcp = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[2]/div/div[1]/div[1]/div[2]/div[1]/div[1]/div')
first_content = fcp.text
#Get FID
fid = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[2]/div/div[1]/div[1]/div[2]/div[3]/div[1]/div')
first_input = fid.text
#Get CLS
cls = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[2]/div/div[1]/div[1]/div[2]/div[4]/div[1]/div')
layout_shift = cls.text
#Get TTI
tti = driver.find_element_by_xpath('//*[#id="interactive"]/div/div[1]')
time_interactive = tti.text
#Get TBT
tbt = driver.find_element_by_xpath('//*[#id="total-blocking-time"]/div/div[1]')
total_block = tbt.text
#Get Total Score
total_score = driver.find_element_by_xpath('//*[#id="page-speed-insights"]/div[2]/div[3]/div[2]/div[1]/div[1]/div/div[1]/a/div[2]')
score = total_score.text
#Adding all columns to dataframe
df.loc[len(df)] = [co_name,core_web,first_content,first_input,layout_shift,time_interactive,total_block,score]
driver.close()
#df.to_csv('Double Page Speed Test 9-10.csv')
print(df)
Q1 : I don't understand how to loop through multiple URLs properly ?
Ans : for url in urls:
Q2. I can't figure out why the script is iterating twice over each URL
Ans : Cause you have for page in range(0, 1):
Update 1:
I did not run your entire code with DF. Also sometimes either one of the pages, does not show the number and href, but when I typically run the below code,
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
driver.implicitly_wait(50)
wait = WebDriverWait(driver, 20)
urls = [
'https://developers.google.com/speed/pagespeed/insights/?url=https://www.crutchfield.com/%2F&tab=mobile',
'https://developers.google.com/speed/pagespeed/insights/?url=https://www.lastpass.com%2F&tab=mobile'
]
data = []
for url in urls:
driver.get(url)
wait = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.CLASS_NAME, 'origin-field-data')))
company = driver.find_elements_by_css_selector("h1.audited-url a")
for i in company:
data.append(i.get_attribute('href'))
print(data)
this output :
['https://www.crutchfield.com//', 'https://www.lastpass.com/', 'https://www.lastpass.com/']
which is true case the element locator that we have used is representing 1 element on page 1 or 2 element on page 2
I am a student working on a scraping project and I am having trouble completing my script because it fills my computer's memory with all of the data is stores.
It currently stores all of my data until the end, so my solution to this would be to break up the scrape into smaller bits and then write out the data periodically so it does not just continue to make one big list and then write out at the end.
In order to do this, I would need to stop my scroll method, scrape the loaded profiles, write out the data that I have collected, and then repeat this process without duplicating my data. It would be appreciated if someone could show me how to do this. Thank you for your help :)
Here's my current code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.common.exceptions import NoSuchElementException
Data = []
driver = webdriver.Chrome()
driver.get("https://directory.bcsp.org/")
count = int(input("Number of Pages to Scrape: "))
body = driver.find_element_by_xpath("//body")
profile_count = driver.find_elements_by_xpath("//div[#align='right']/a")
while len(profile_count) < count: # Get links up to "count"
body.send_keys(Keys.END)
sleep(1)
profile_count = driver.find_elements_by_xpath("//div[#align='right']/a")
for link in profile_count: # Calling up links
temp = link.get_attribute('href') # temp for
driver.execute_script("window.open('');") # open new tab
driver.switch_to.window(driver.window_handles[1]) # focus new tab
driver.get(temp)
# scrape code
Name = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[1]/div[2]/div').text
IssuedBy = "Board of Certified Safety Professionals"
CertificationorDesignaationNumber = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[1]/td[3]/div[2]').text
CertfiedorDesignatedSince = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[3]/td[1]/div[2]').text
try:
AccreditedBy = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[5]/td[3]/div[2]/a').text
except NoSuchElementException:
AccreditedBy = "N/A"
try:
Expires = driver.find_element_by_xpath('/html/body/table/tbody/tr/td/table/tbody/tr/td[5]/div/table[1]/tbody/tr/td[3]/table/tbody/tr[5]/td[1]/div[2]').text
except NoSuchElementException:
Expires = "N/A"
info = Name, IssuedBy, CertificationorDesignaationNumber, CertfiedorDesignatedSince, AccreditedBy, Expires + "\n"
Data.extend(info)
driver.close()
driver.switch_to.window(driver.window_handles[0])
with open("Spredsheet.txt", "w") as output:
output.write(','.join(Data))
driver.close()
Test.py
Displaying Test.py.
Try the below approach using requests and beautifulsoup. In the below script i have used the API URL fetched from website itself for ex:-API URL
First it will create the URL(refer first url) for first iteration, add headers and data in .csv file.
Second iteration it will again create the URL(refer second url) with 2 extra params start_on_page=20 & show_per_page=20 where start_on_page number 20 is incremented by 20 on each iteration and show_per_page = 100 defaulted to extract 100 records per iteration so on till all the data dumped in to the .csv file.second iteration API URL
Script is dumping 4 things number, name, location and profile url.
On each iteration data will be appended to .csv file , so your memory issue will get resolved by this approach.
Do not forget to add your system path in file_path variable where do you want to create .csv file before running the script.
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs
import csv
def scrap_directory_data():
list_of_credentials = []
file_path = ''
file_name = 'credential_list.csv'
count = 0
page_number = 0
page_size = 100
create_url = ''
main_url = 'https://directory.bcsp.org/search_results.php?'
first_iteration_url = 'first_name=&last_name=&city=&state=&country=&certification=&unauthorized=0&retired=0&specialties=&industries='
number_of_records = 0
csv_headers = ['#','Name','Location','Profile URL']
while True:
if count == 0:
create_url = main_url + first_iteration_url
print('-' * 100)
print('1 iteration URL created: ' + create_url)
print('-' * 100)
else:
create_url = main_url + 'start_on_page=' + str(page_number) + '&show_per_page=' + str(page_size) + '&' + first_iteration_url
print('-' * 100)
print('Other then first iteration URL created: ' + create_url)
print('-' * 100)
page = requests.get(create_url,verify=False)
extracted_text = bs(page.text, 'lxml')
result = extracted_text.find_all('tr')
if len(result) > 0:
for idx, data in enumerate(result):
if idx > 0:
number_of_records +=1
name = data.contents[1].text
location = data.contents[3].text
profile_url = data.contents[5].contents[0].attrs['href']
list_of_credentials.append({
'#':number_of_records,
'Name':name,
'Location': location,
'Profile URL': profile_url
})
print(data)
with open(file_path + file_name ,'a+') as cred_CSV:
csvwriter = csv.DictWriter(cred_CSV, delimiter=',',lineterminator='\n',fieldnames=csv_headers)
if idx == 0 and count == 0:
print('Writing CSV header now...')
csvwriter.writeheader()
else:
for item in list_of_credentials:
print('Writing data rows now..')
print(item)
csvwriter.writerow(item)
list_of_credentials = []
count +=1
page_number +=20
scrap_directory_data()
I have found many reference that scroll the entire webpage but I am looking for a particular section to scroll. I am working on marketwatch.com - section - latest news tab. How can I scroll just this latest news tab using selenium webdriver?
Below is my code which returns the heading of the news but keeps repeating same headings.
from bs4 import BeautifulSoup
import urllib
import csv
import time
from selenium import webdriver
count = 0
browser = webdriver.Chrome()
browser.get("https://www.marketwatch.com/newsviewer")
pageSource = browser.page_source
soup = BeautifulSoup(pageSource, 'lxml')
arkodiv = soup.find("ol", class_="viewport")
while browser.find_element_by_tag_name('ol'):
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(0.5)
div = list(arkodiv.find_all('div', class_= "nv-details"))
heading = []
Data_11 = list(soup.find_all("div", class_ = "nv-text-cont"))
datetime = list(arkodiv.find_all("li", timestamp = True))
for sa in datetime:
sh = sa.find("div", class_ = "nv-text-cont")
if sh.find("a", class_ = True):
di = sh.text.strip()
di = di.encode('ascii', 'ignore').decode('ascii')
else:
continue
print di
heading.append((di))
count = count+1
if 'End of Results' in arkodiv:
print 'end'
break
else:
continue
print count
That happens because the script you are executing scrolls to the bottom of the page.
To keep scrolling inside the element fetching news you need to replace this:
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
with this:
browser.execute_script("document.documentElement.getElementsByClassName('viewport')[0].scrollTop = 999999")
EDIT
This is the complete working solution:
from bs4 import BeautifulSoup
import urllib
import csv
import time
from selenium import webdriver
count = 0
browser = webdriver.Chrome()
browser.get("https://www.marketwatch.com/newsviewer")
while browser.find_element_by_tag_name('ol'):
pageSource = browser.page_source
soup = BeautifulSoup(pageSource, 'lxml')
arkodiv = soup.find("ol", class_="viewport")
browser.execute_script("document.documentElement.getElementsByClassName('viewport')[0].scrollTop = 999999")
time.sleep(0.5)
div = list(arkodiv.find_all('div', class_= "nv-details"))
heading = set()
Data_11 = list(soup.find_all("div", class_ = "nv-text-cont"))
datetime = list(arkodiv.find_all("li", timestamp = True))
for sa in datetime:
sh = sa.find("div", class_ = "nv-text-cont")
if sh.find("a", class_ = True):
di = sh.text.strip()
di = di.encode('ascii', 'ignore').decode('ascii')
else:
continue
print di
heading.add((di))
count = count+1
if 'End of Results' in arkodiv:
print 'end'
break
else:
continue
print count
EDIT 2
You may also want to change how you store the headers, since the way you currently do keeps duplicates inside the list. Changed it to a set so that doesn't happen.