I am trying to create a basic web scraper that takes input from the user to search for a video on YouTube and then write x number of titles to a file. I have tried using the ID of the video title, XPATH, and CSS_SELECT0R but am unable to get the program to fully do what I am wanting it to. I either get stuck in the automated browser (it doesn't quit) or I am unable to write the web element to the file. This is my current code:
# Importing the necessary modules for the program to run.
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver import ActionChains
from selenium import webdriver
# This is the first part of the program. It asks the user for a search query and how many results they
# want to see.
request = input('Search for: ')
split_request = request.split()
url = 'https://www.Youtube.com/results?search_query=' + '+'.join(split_request) + '+'
query_amount = int(input('How many results do you want to see? '))
driver = webdriver.Safari()
actions = ActionChains(driver)
driver.get(url)
i = 0
# Creating a new file called 'YouTube Search Results.txt' and opening it in write mode.
with open('YouTube Search Results.txt', mode='w') as file:
pass
try:
# Waiting for the elements with the ID 'video-title' to load.
WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, '//*[#id="video-title"]/yt-formatted-string/text()')))
# This is the part of the program that scrolls down the page to load more results.
query = (driver.find_elements(By.XPATH, '//*[#id="video-title"]/yt-formatted-string/text()'))
while i < query_amount:
actions.scroll_by_amount(0, 10)
query = (driver.find_elements(By.XPATH, '//*[#id="video-title"]/yt-formatted-string/text()'))
i = len(query)
# Counting the number of results and breaking the loop when the number of results is greater than
# the amount of results the user wants to see.
count = 0
for title in query:
if count > query_amount:
break
# This is the part of the program that writes the results to the file.
else:
count += 1
with open('YouTube Search Results.txt', mode='a') as file:
file.write(title.text + '\n')
finally:
driver.quit()
I believe the issue is in the last block but I am unable to figure it out.
Thanks in advance for your help!
Would appreciate a help with selenium.
Trying to fill in google form for several entries, so that I need to input the feirst row a df, than click "Submit" a new form and run again for the second form and to the n-th row.
Got stuck with NoSuchFrameException: Unable to locate frame with index error after the first entry. Read on Selenium docs that one can locate a window's frame in console and it gives me nothing (F12 --> find frame (any combination tried) --> no matches). No such thing in google form (or my search is wrong hands down)
Haven't got anything on the issue so tried frame(0) - no luck.
Any tips would be appreciated. The whole code is below
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
import pandas as pd
options = Options()
options.binary_location = FirefoxBinary(r"C:\Program Files\Mozilla Firefox\firefox.exe")
driver = webdriver.Firefox(executable_path=r'C:\WebDriver\bin\geckodriver.exe', firefox_options=options)
driver.implicitly_wait(10)
reg = pd.read_csv(r'C:\Users\User\Desktop\Form.csv', header=0, delimiter=';', sep=r'\s*;\s*')
reg_2 = reg.values.tolist()
driver.get('https://docs.google.com/forms/d/e/1FAIpQLSd9FQ33H5SMHelf9O1jjHl7FtLTtaTdFuC4dUFv-educaFiJA/viewform?vc=0&c=0&w=1&flr=0&gxids=7628')
try:
for row in reg_2:
element_count = 0
for element in range(len(row)):
first = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input")
last = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input")
mail = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[3]/div/div/div[2]/div/div[1]/div/div[1]/input")
last.send_keys(row[0])
first.send_keys(row[1])
mail.send_keys(row[2])
submit = driver.find_element_by_xpath('//*[#id="mG61Hd"]/div[2]/div/div[3]/div[1]/div/div/span/span')
submit.click()
time.sleep(3)
element_count +=1
driver.switch_to.frame(0)
driver.switch_to.default_content()
finally:
driver.quit()
driver.switch_to.frame(0)
driver.switch_to.default_content()
remove this two line of code that google form doesn't have any iframe in it
if you want to submit agian click the submit another response link:
driver.find_element_by_xpath('//a[contains(text(),"Submit another response")]').click()
Well, in fact after deleting the old form and starting anew the thing worked in the end. Had to change several other elements. Also added password and password_confirm field:
while len(reg_2) > element_count:
try:
for row in reg_2:
first = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input")
last = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input")
mail = driver.find_element_by_xpath("/html/body/div/div[2]/form/div[2]/div/div[2]/div[3]/div/div/div[2]/div/div[1]/div/div[1]/input")
password = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[4]/div/div/div[2]/div/div[1]/div/div[1]/input')
password_confirm = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[5]/div/div/div[2]/div/div[1]/div/div[1]/input')
last.send_keys(row[0])
first.send_keys(row[1])
mail.send_keys(row[2])
password.send_keys(row[3])
password_confirm.send_keys(row[3])
submit = driver.find_element_by_xpath('//*[#id="mG61Hd"]/div[2]/div/div[3]/div[1]/div/div/span/span')
submit.click()
time.sleep(3)
element_count +=1
#driver.find_element_by_xpath('/html/body/div[1]/div[2]/div[1]/div/div[4]/a').click()
driver.find_element_by_css_selector('.freebirdFormviewerViewResponseLinksContainer > a:nth-child(1)').click()
finally:
driver.quit()
My purpose it to download a zip file from https://www.shareinvestor.com/prices/price_download_zip_file.zip?type=history_all&market=bursa
It is a link in this webpage https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa. Then save it into this directory "/home/vinvin/shKLSE/ (I am using pythonaywhere). Then unzip it and the csv file extract in the directory.
The code run until the end with no error but it does not downloaded.
The zip file is automatically downloaded when click on https://www.shareinvestor.com/prices/price_download_zip_file.zip?type=history_all&market=bursa manually.
My code with a working username and password is used. The real username and password is used so that it is easier to understand the problem.
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', '/zip')
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(30)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile(/home/vinvin/sh/KLSE, 'r')
zip_ref.extractall(/home/vinvin/sh/KLSE)
zip_ref.close()
os.remove(zip_ref)
HTML snippet:
<li>All Historical Data <span>About 220 MB</span></li>
Note that & is shown when I copy the snippet. It was hidden from view source, so I guess it is written in JavaScript.
Observation I found
The directory home/vinvin/shKLSE do not created even I run the code with no error
I try to download a much smaller zip file which can be completed in a second but still do not download after a wait of 30s. dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_daily&date=20170519&market=bursa']").click()
I don't see any major drawback in your code block as such. But here are a few recommendations through this Solution & the execution of this Automated Test Script:
This code works perfect in Off Market Hours. During Market Hours a lot of JavaScript & Ajax Calls are in play and handling those are beyond the scope of this Question.
You may consider checking for the the intended download directory first & if not available, create a new one. That code block for this functionality is in Windows style and works perfect on Windows platform.
Once you click on "Login" induce some wait for the HTML DOM to render properly.
When you want to see off the downloading process, you need to set certain more preferences in the FirefoxProfile as mentioned in my code below.
Always consider maximizing the browser window through browser.maximize_window()
When you start downloading you need to wait for sufficient amount of time to get the file completely downloaded.
If you are using browser.quit() at the end you don't need to use browser.close()
You may consider to replace all the time.sleep() with either of ImplicitlyWait or ExplicitWait or FluentWait.
Here is your own code block with some simple tweaks in it:
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
newpath = 'C:\\home\\vivvin\\shKLSE'
if not os.path.exists(newpath):
os.makedirs(newpath)
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.dir",newpath);
profile.set_preference("browser.download.folderList",2);
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/zip");
profile.set_preference("browser.download.manager.showWhenStarting",False);
profile.set_preference("browser.helperApps.neverAsk.openFile","application/zip");
profile.set_preference("browser.helperApps.alwaysAsk.force", False);
profile.set_preference("browser.download.manager.useWindow", False);
profile.set_preference("browser.download.manager.focusWhenStarting", False);
profile.set_preference("browser.helperApps.neverAsk.openFile", "");
profile.set_preference("browser.download.manager.alertOnEXEOpen", False);
profile.set_preference("browser.download.manager.showAlertOnComplete", False);
profile.set_preference("browser.download.manager.closeWhenDone", True);
profile.set_preference("pdfjs.disabled", True);
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.maximize_window()
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
time.sleep(10)
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(900)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile(/home/vinvin/sh/KLSE, 'r')
zip_ref.extractall(/home/vinvin/sh/KLSE)
zip_ref.close()
os.remove(zip_ref)
Let me know if this Answers your Question.
I rewrote your script, with comments explaining why I made the changes I made. I think your main problem might have been a bad mimetype, however, your script had a log of systemic issues that would have made it unreliable at best. This rewrite uses explicit waits, which completely removes the need to use time.sleep(), allowing it to run as fast as possible, while also eliminating errors that arise from network congestion.
You will need do the following to make sure all modules are installed:
pip install requests explicit selenium retry pyvirtualdisplay
The script:
#!/usr/bin/python
from __future__ import print_function # Makes your code portable
import os
import glob
import zipfile
from contextlib import contextmanager
import requests
from retry import retry
from explicit import waiter, XPATH, ID
from selenium import webdriver
from pyvirtualdisplay import Display
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
DOWNLOAD_DIR = "/tmp/shKLSE/"
def build_profile():
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', DOWNLOAD_DIR)
# I think your `/zip` mime type was incorrect. This works for me
profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'application/vnd.ms-excel,application/zip')
return profile
# Retry is an elegant way to retry the browser creation
# Though you should narrow the scope to whatever the actual exception is you are
# retrying on
#retry(Exception, tries=5, delay=3)
#contextmanager # This turns get_browser into a context manager
def get_browser():
# Use a context manager with Display, so it will be closed even if an
# exception is thrown
profile = build_profile()
with Display(visible=0, size=(800, 600)):
browser = webdriver.Firefox(profile)
print("firefox")
try:
yield browser
finally:
# Let a try/finally block manage closing the browser, even if an
# exception is called
browser.quit()
def main():
print("hello from python 2")
with get_browser() as browser:
browser.get("https://www.shareinvestor.com/my")
# Click the login button
# waiter is a helper function that makes it easy to use explicit waits
# with it you dont need to use time.sleep() calls at all
login_xpath = '//*/div[#class="sic_logIn-bg"]/a'
waiter.find_element(browser, login_xpath, XPATH).click()
print(browser.current_url)
# Log in
username = "bkcollection"
username_id = "sic_login_header_username"
password = "123456"
password_id = "sic_login_header_password"
waiter.find_write(browser, username_id, username, by=ID)
waiter.find_write(browser, password_id, password, by=ID, send_enter=True)
# Wait for login process to finish by locating an element only found
# after logging in, like the Logged In Nav
nav_id = 'sic_loggedInNav'
waiter.find_element(browser, nav_id, ID)
print("log in done")
# Load the target page
target_url = ("https://www.shareinvestor.com/prices/price_download.html#/?"
"type=price_download_all_stocks_bursa")
browser.get(target_url)
print(browser.current_url)
# CLick download button
all_data_xpath = ("//*[#href='/prices/price_download_zip_file.zip?"
"type=history_all&market=bursa']")
waiter.find_element(browser, all_data_xpath, XPATH).click()
# This is a bit challenging: You need to wait until the download is complete
# This file is 220 MB, it takes a while to complete. This method waits until
# there is at least one file in the dir, then waits until there are no
# filenames that end in `.part`
# Note that is is problematic if there is already a file in the target dir. I
# suggest looking into using the tempdir module to create a unique, temporary
# directory for downloading every time you run your script
print("Waiting for download to complete")
at_least_1 = lambda x: len(x("{0}/*.zip*".format(DOWNLOAD_DIR))) > 0
WebDriverWait(glob.glob, 300).until(at_least_1)
no_parts = lambda x: len(x("{0}/*.part".format(DOWNLOAD_DIR))) == 0
WebDriverWait(glob.glob, 300).until(no_parts)
print("Download Done")
# Now do whatever it is you need to do with the zip file
# zip_ref = zipfile.ZipFile(DOWNLOAD_DIR, 'r')
# zip_ref.extractall(DOWNLOAD_DIR)
# zip_ref.close()
# os.remove(zip_ref)
print("Done!")
if __name__ == "__main__":
main()
Full disclosure: I maintain the explicit module. It is designed to make using explicit waits much easier, for exactly situations like this, where websites slowly load in dynamic content based on user interactions. You could replace all of the waiter.XXX calls above with direct explicit waits.
Take it out side the scope of the selenium. Change the preference settings so that when the link is clicked (First check if link is valid) it gives you a pop up asking to save , now use sikuli http://www.sikuli.org/ to click on the popup.
Mime types does not always work, and there is no black and white answer why it is not working.
The reason is due to the webpage is loading slowly. I added a wait of 20 seconds after open the webpage link
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
It returns no error.
Additional,
/zip is incorrect MIME type. Change to profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
The final correction :
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
# application/zip not /zip
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(30)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile('/home/vinvin/shKLSE/file.zip', 'r')
zip_ref.extractall('/home/vinvin/shKLSE')
zip_ref.close()
# remove with correct path
os.remove('/home/vinvin/shKLSE/file.zip')
I haven't tried on the site you mentioned, however following code works perfectly and downloads the ZIP. if you are not able to download the zip, Mime type could be different. you can use chrome browser and network inspection to check the mime type of the file you are trying to download.
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
browser = webdriver.Firefox(profile)
browser.get("http://www.colorado.edu/conflict/peace/download/peace.zip")
The code I've written successfully does its part in crawling through a website with the appropriate date and time formatted in the URL, grabbing the table from the underlying HTML source code, and appending the results to a cache.
This python file gets run several dozen times (there are many agent IDs whose info I need to grab); after the script runs, however, dozens of chrome.exe and chromedriver.exe instances still appear in the computer's memory (this is visible in the computer's "Resource Monitor.")
Below is my code. I've used driver.quit() as well as driver.close() and even both together (with driver.close() coming first).
Isn't the driver.quit() supposed to close the instances in the computer's system? Why are they appearing in the memory? Is there a solution to this issue?
Please let me know if I can provide any further information. Thank you in advance.
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from datetime import datetime, timedelta
import credentials_page
def get_updated_url(agent_id_num):
now = datetime.utcnow()
today = datetime(now.year, now.month, now.day)
yesterday = today - timedelta(days=1)
previous_date_string = str(yesterday)[:10]
return 'https://examplewebsite.com/agentId/'+agent_id_num+'/orderBy/ASC/startDate/'+previous_date_string+'%204:00%20AM/endDate/'+previous_date_string+'%2010:30%20PM'
def login_entry(username, password, browser): # logs into website
login_email = browser.find_element_by_id('UserName')
login_email.send_keys(username)
login_password = browser.find_element_by_id('Password')
login_password.send_keys(password)
submit_elem = browser.find_element_by_xpath("//button[contains(text(), 'Log in')]")
submit_elem.click()
def get_element(xpath, browser): # grabs element, turns it into needed table in raw HTML format
table_of_interest = browser.find_element_by_xpath(xpath)
# this has a type of <class 'selenium.webdriver.remote.webelement.WebElement'>
return str('<table>'+table_of_interest.get_attribute('innerHTML')+'</table>')
def record_source_code(destination_cache, get_element_html): # takes element HTML and writes it to cache
code_destination = open(destination_cache, 'w')
code_destination.write(repr(get_element_html))
code_destination.close()
def main_function(agent_id):
driver = webdriver.Chrome()
# figure out strings for start_date, end_date
url = get_updated_url(agent_id)
driver.get(url)
#login
login_entry(credentials_page.website_username, credentials_page.website_password, driver)
# first test to see if "not found"
if len(driver.find_elements_by_xpath("//*[text()='Not Found']"))>0:
logoff_elem = driver.find_element_by_xpath("//*[contains(text(), 'Log off')]")
logoff_elem.click()
driver.quit()
return False
else:
#grab table needed
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH,'/html/body/div/div/div[2]/div[2]/table/tbody')))
table_html = get_element('/html/body/div/div/div[2]/div[2]/table/tbody', driver)
driver.quit()
record_source_code('results_cache.html', table_html)
return True
I think the root cause is your code doesn't handle an exception. So when an exception occurs, it won't quit. Try/catch should help.
def main_function(agent_id):
driver = webdriver.Chrome()
# figure out strings for start_date, end_date
url = get_updated_url(agent_id)
try:
driver.get(url)
#login
login_entry(credentials_page.website_username, credentials_page.website_password, driver)
# first test to see if "not found"
if len(driver.find_elements_by_xpath("//*[text()='Not Found']"))>0:
logoff_elem = driver.find_element_by_xpath("//*[contains(text(), 'Log off')]")
logoff_elem.click()
driver.quit()
return False
else:
#grab table needed
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH,'/html/body/div/div/div[2]/div[2]/table/tbody')))
table_html = get_element('/html/body/div/div/div[2]/div[2]/table/tbody', driver)
driver.quit()
record_source_code('results_cache.html', table_html)
return True
except:
drier.quit() #<-- Try/catch and close it
Hi I am trying to make script that checks URL of current web page until the URL changes. And i have already done it and it works great but after some time i get
this error Picture of error message
Any help would be appreciated: :-)
P.S. I am new to python so please be kind :D
The script:
import easygui
import os
import time
from selenium import webdriver
driver = webdriver.Chrome("PATH TO CHROMEDRIVER.EXE")
driver.get("URL OF SOME WEBSITE")
time.sleep(10)
b = "URL OF SOME WEBSITE"
a = driver.current_url
while a == b:
a = driver.current_url
else:
easygui.msgbox ("URL changed")
I would put a sleep in your while loop. If you don't, then you're sending commands to the browser as fast as your machine can handle (which isn't necessary) and that can cause issues.
while a == b:
a = driver.current_url
time.sleep(0.25)
else:
easygui.msgbox ("URL changed")