Scroll in Selenium Webdriver (Python) - python

Prerequisites.
You need an account at Instagram to use this script.
Setup a test environment:
Log in, open the needed list(works correctly):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
driver = webdriver.Chrome(
# driver = webdriver.Firefox(
# driver = webdriver.PhantomJS(
service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
driver.get("https://instagram.com/accounts/login")
username = driver.find_element_by_name("username")
password = driver.find_element_by_name("password")
username1 = 'instagram' # change it!
password1 = 'instagrampassword1' # change it!
username.send_keys(username1)
password.send_keys(password1)
submit_button = driver.find_element_by_css_selector(
'#react-root > div > article > div > div:nth-child(1) > div > form > span > button')
submit_button.click()
sleep(2)
link = 'https://www.instagram.com/youtube/'
driver.get(link)
driver.implicitly_wait(2)
driver.find_elements_by_class_name("_218yx")[2].click()
Wrong scroll.
How to fix this block?
How to focus and scroll correctly on this page?
My attempts:
driver.find_element_by_class_name("_cx1ua").send_keys(Keys.NULL) # focus
#The element has been deleted entirely or
#The element is no longer attached to the DOM.
driver.find_element_by_class_name("_q44m8").send_keys(Keys.NULL)
# cannot focus element
driver.find_element_by_class_name("_qjr85").send_keys(Keys.NULL)
# cannot focus element
for i in range(5):
driver.find_element_by_class_name("_cx1ua").send_keys(Keys.END)
=============================================================
to #Moshisho :
We need to focus on some element to activate it.
The question is what the element we need to choose to focus and how?
This is not a "body":
something like that, but not this:
background = driver.find_element_by_css_selector("body")
# background = driver.find_element_by_css_selector("div._2uju6")
for i in range(5):
background.send_keys(Keys.SPACE)
time.sleep(1)
Without it this command do not work.
to #Naveen :
print(driver.find_element_by_css_selector("div._a1rcs").location_once_scrolled_into_view) # {'x': 0, 'y': 0}
print(driver.find_element_by_class_name("_cx1ua").location_once_scrolled_into_view) # {'x': 376, 'y': 229}
print(driver.find_element_by_class_name("_q44m8").location_once_scrolled_into_view) # {'x': 376, 'y': 180}
print(driver.find_element_by_class_name("_qjr85").location_once_scrolled_into_view) # {'x': 376, 'y': 180}
And what's next?
driver.execute_script("window.scrollTo(0, 3000);") # do not working

Try the following code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome(
# driver = webdriver.Firefox(
# driver = webdriver.PhantomJS(
service_args=['--ignore-ssl-errors=true', '--ssl-protocol=any'])
driver.maximize_window()
driver.get("https://instagram.com/accounts/login")
username = driver.find_element_by_name("username")
password = driver.find_element_by_name("password")
username1 = 'instagramlogin1' # change it!
password1 = 'instagrampassword1' # change it!
username.send_keys(username1)
password.send_keys(password1)
submit_button = driver.find_element_by_css_selector(
'#react-root > div > article > div > div:nth-child(1) > div > form > span > button')
submit_button.click()
sleep(2)
link = 'https://www.instagram.com/youtube/'
driver.get(link)
driver.implicitly_wait(2)
following = driver.find_element_by_xpath("//a[#href='/youtube/following/']/span")
total_following = int(following.text)
print "total no. of users following: ", total_following
# click on 239 following, displays 10 users
following.click()
loaded_following = driver.find_elements_by_xpath("//ul[#class='_539vh _4j13h']/li")
loaded_till_now = len(loaded_following)
while(loaded_till_now<total_following):
print "following users loaded till now: ", loaded_till_now
print loaded_following[loaded_till_now-1]
loaded_following[loaded_till_now-1].location_once_scrolled_into_view
# driver.execute_script("arguments[0].focus();", loaded_following[loaded_till_now-1])
driver.find_element_by_tag_name('body').send_keys(Keys.END) # triggers AJAX request to load more users. observed that loading 10 users at a time.
sleep(1) # tried wihtout sleep but throws StaleElementReferenceException. As it takes time to get the resposne and update the DOM
loaded_following = driver.find_elements_by_xpath("//ul[#class='_539vh _4j13h']/li")
loaded_till_now = len(loaded_following)
# All 239 users are loaded.
driver.quit()
Observed that browser is sending AJAX request to load more users. this action is triggered when you scroll using mouse or enter Space or End keys

In order to scroll in the window, you need to execute JavaScript, try this:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
EDIT: in order to focus an element (it needs to be able to get the focus e.g. an anchor, input, button etc...) you also need to use JavaScript executor:
elementToFocus = driver.find_element_by_id("yourID")
driver.execute_script("arguments[0].focus();", elementToFocus)

I'm working with a dynamic React app, I need to scroll to the pages bottom to make react render all the data.
For unknown reasons, solutions based on JS execute_script didn't work. However I got send_keys solution working:
# scroll to bottom to load all
WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, "//body"))
)
attempt_num = 2
while attempt_num > 0:
try:
elem = driver.find_element_by_xpath("//body")
elem.click()
elem.send_keys(Keys.END)
except StaleElementReferenceException as e:
print(e)
attempt_num = attempt_num - 1
The click() on body and the retry for StaleElementReferenceException are crucial. I haven't found a more elegant way than to retry.
See top answer of How to avoid "StaleElementReferenceException" in Selenium?

Related

~FIREFOX~ Selenium Instagram Bot is unable to locate Follow button to follow profiles despite trying By. XPATH, LINK_TEXT, CLASS_NAME etc

I am on VS Code and my Selenium Instagram Bot's intentional design is to read from a list of profiles from a .txt file, visit those profiles, follow and like a specified number of their posts(if they are private, it just follows them) then goes on to the next profile in the list, all the while using different pre-made bot accounts who's usernames are also on a list, so the code may iterate over them once a number of profiles have been engaged with by a single bot.
I am able to iterate over target profiles, but right now I am just having problems with locating elements and having them to be clicked by the bot. I got it to work on 1 profile, after going to the next profile, it simply didn't do anything and seems to can't find the follow button to click again(I can't recreate this, after some changes were made lol, just getting back into Python after briefly touching on it in school). I still haven't even seen the bot like a post too. Although, the XPATHS on the Log In and the Pop Ups seems to work. It's now just not interacting with the profiles.
~
Any insights would be highly appreciated!
Source Code:
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time, random
from selenium.webdriver.common.keys import Keys
profilePath = (r'C:\Users\****\AppData\Roaming\Mozilla\Firefox\Profiles\75d4lwz2.3rd')
options = Options()
service = Service('geckodrivere.exe')
firefox = webdriver.Firefox(options=options, service=service)
wait = WebDriverWait(firefox, 20)
file = open('scrape_archivepages.txt', 'r')
data = file.read()
igUsers = data.split('\n')
file.close()
file2 = open('botlist.txt', 'r')
data2 = file2.read()
bots = data2.split('\n')
file2.close()
def startLogIn(user_, pass_,):
firefox.get('https://www.instagram.com/')
while True:
try:
cookiesAccept = firefox.find_element(By.XPATH, '/html/body/div[2]/div/div/div/div[2]/div/div/div[1]/div/div[2]/div/div/div/div/div[2]/div/button[2]')
time.sleep(4)
cookiesAccept.click()
time.sleep(4)
break
except:
pass
username = firefox.find_element(By.XPATH, '//*[#id="loginForm"]/div/div[1]/div/label/input')
password = firefox.find_element(By.XPATH, '//*[#id="loginForm"]/div/div[2]/div/label/input')
username.click()
username.send_keys(user_)
time.sleep(random.randint(1, 2))
password.click()
password.send_keys(pass_)
time.sleep(random.randint(1, 2))
log_in=firefox.find_element(By.XPATH, '//*[#id="loginForm"]/div/div[3]')
log_in.click()
time.sleep(5)
# while True:
# try:
# credentials= firefox.find_element(By.XPATH, '//button[text()="Not Now"]')
# time.sleep(3)
# credentials.click()
# break
# except:
# pass
# while True:
# try:
# notifications = firefox.find_element(By.XPATH, '//button[text()="Not Now"]')
# time.sleep(3)
# notifications.click()
# break
# except:
# pass
def interact(igUserLink, n):
firefox.get(igUserLink)
time.sleep(2)
#while True:
# try:
follow = firefox.find_element(By.CSS_SELECTOR, '#mount_0_0_0I > div > div > div > div.x9f619.x1n2onr6.x1ja2u2z > div > div > div > div.x78zum5.xdt5ytf.x10cihs4.x1t2pt76.x1n2onr6.x1ja2u2z > div.x9f619.xnz67gz.x78zum5.x168nmei.x13lgxp2.x5pf9jr.xo71vjh.x1uhb9sk.x1plvlek.xryxfnj.x1c4vz4f.x2lah0s.x1q0g3np.xqjyukv.x1qjc9v5.x1oa3qoh.x1qughib > div.xh8yej3.x1gryazu.x10o80wk.x14k21rp.x1porb0y.x17snn68.x6osk4m > section > main > div > header > section > div.x6s0dn4.x78zum5.x1q0g3np.xs83m0k.xeuugli.x1n2onr6 > div._ab8w._ab94._ab99._ab9f._ab9k._ab9p._abb3._abcm > div > div._ab8w._ab94._ab99._ab9f._ab9m._ab9o._abb0._abcm > button > div > div')
time.sleep(2)
private = firefox.find_element(By.XPATH, '/html/body/div[2]/div/div/div/div[1]/div/div/div/div[1]/section/main/div/div/article/div[1]/div/h2')
if(bool(private)):
print('lol')
follow.click()
time.sleep(2)
##xpath of header from IG saying profile is private
if (not(bool(private))):
print('here')
follow.click()
time.sleep(2)
time.sleep(2)
c = 0
numPosts = firefox.find_element(By.XPATH, '/html/body/div[2]/div/div/div/div[1]/div/div/div/div[1]/div[1]/div[2]/section/main/div/header/section/ul/li[1]/div/span/span')
numPosts = int(numPosts.text)
if n == 0:
#do nothing
if n <= numPosts:
media = firefox.find_element(By.XPATH, '/html/body/div[2]/div/div/div/div[1]/div/div/div/div[1]/div[1]/div[2]/section/main/div/div[2]/article/div/div/div[1]/div[1]')
media.click()
time.sleep(1)
like = firefox.find_element(By.NAME, 'Like')
next = firefox.find_element(By.NAME, 'Next')
while(c<n):
like.click()
time.sleep(3)
c=c+1
next.click()
#break
#except:
#pass
def VibeFinderInteract(listOfBots, passw, userLink):
for userbot in listOfBots:
startLogIn(userbot, passw)
for u in userLink:
interact(u, 2)
print('')
VibeFinderInteract(bots, 'samepasswordforallthebots', igUsers)

Web crawler wont complete loop form autofill task with python

I have a script linked with a csv file that should run a loop to fill in page form text, then submit the form, and go back to the prior page form and fill in form text with the next row of data from the csv file.
Currently the script completes the loop task for the first row of csv data, submits the form, then goes back to the original page form but doesnt loop again by autofilling the form for the next row of csv inputs. In short, the loop finishes a single cycle following the page submission and then ends.
What can I do to make the loop continue autofilling for the remainder of the csv rows? Thank you all!
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import csv
import time
import pandas as pd
# import csv file
table = pd.read_csv(r'...test2.csv')
print(table)
address1 = table['Address2'].tolist()
unit1 = table['Unit2'].tolist()
unittype1 = table['Unit Type'].tolist()
beds1 = table['Beds2'].tolist()
bath1 = table['Baths2'].tolist()
rent1 = table['Rent2'].tolist()
vouchbeds1 = table['Vouchbeds2'].tolist()
# open chrome
# driver = Webdriver.chrome("C:\Python Tools\chromedriver.exe")
s = Service("C:\Python Tools\chromedriver.exe")
driver = webdriver.Chrome(service=s)
# Enter login
driver.get("https://hadashboard.gosection8.com/pages/login/Login.aspx")
driver.implicitly_wait(5)
driver.find_element(By.CSS_SELECTOR, ".form > input:nth-child(3)").send_keys("hiddenlogin")
driver.find_element(By.CSS_SELECTOR, ".form > input:nth-child(6)").send_keys("hiddenpassword")
driver.find_element(By.CSS_SELECTOR, ".m-col-12:nth-child(8)").click()
driver.implicitly_wait(10)
# go to rent reasonableness analysis
driver.find_element(By.CSS_SELECTOR, ".not-now-btn").click()
driver.find_element(By.CSS_SELECTOR, ".clear-fix > div > .rent-btn-row > .primary-button").click()
driver.implicitly_wait(10)
# https://stackoverflow.com/questions/66933061/looping-through-several-columns-and-rows-from-csv-to-fill-a-form
address = driver.find_element(By.ID, "SubjectPage_AutocompleteAddress")
unit = driver.find_element(By.ID, 'SubjectPage_AddressLine2_Auto')
beds = driver.find_element(By.ID, "SubjectPage_BedroomCount")
baths = driver.find_element(By.ID, "SubjectPage_FullBathCount")
rent = driver.find_element(By.ID, "SubjectPage_AskingRent")
vouchbeds = driver.find_element(By.ID, "SubjectPage_VoucherBedroomCount")
for address1, unit1, unittype1, beds1, bath1, rent1, vouchbeds1 in zip(address1, unit1, unittype1, beds1, bath1, rent1, vouchbeds1):
address.send_keys(address1)
time.sleep(4)
unit.send_keys(unit1)
driver.implicitly_wait(10)
beds.send_keys(beds1)
driver.implicitly_wait(10)
baths.send_keys(bath1)
driver.implicitly_wait(10)
driver.find_element(By.CSS_SELECTOR, "#SubjectPage_PropertyType_Fake > select").click()
dropdown = driver.find_element(By.CSS_SELECTOR, "#SubjectPage_PropertyType_Fake > select")
dropdown.find_element(By.XPATH, "//option[. = 'Apartment']").click()
#time.sleep(2)
rent.send_keys(rent1)
driver.implicitly_wait(10)
driver.find_element(By.ID, "SubjectPage_VoucherBedroomCount").click()
vouchbeds.send_keys(vouchbeds1)
driver.implicitly_wait(10)
submit = driver.find_element(By.ID, "SubjectPage_AnalyzeBottom").click()
time.sleep(10)
driver.find_element(By.CSS_SELECTOR, ".subject-cmn-btns:nth-child(1)").click()
time.sleep(5)
# return to page forms for next loop: https://hadashboard.gosection8.com/RentWatch5/RentWatch5.aspx
driver.get("https://hadashboard.gosection8.com/RentWatch5/RentWatch5.aspx")

Scrolling a particular web element using Selenium python

I'm trying to scrape company's jobs offer from linkedin. I need to scroll a section in the page (with an inner scrollbar). I have been trying this :
1.
scroll_active = WebDriverWait(driver, 40).until(EC.presence_of_element_located((By.CSS_SELECTOR, "body > div.application-outlet > div.authentication-outlet > div.job-search-ext > div > div > section.jobs-search__left-rail > div > div > ul")))
scroll_active.location_once_scrolled_into_view
while driver.find_element_by_tag_name('div'):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
Divs=driver.find_element_by_tag_name('div').text
if 'End of Results' in Divs:
print 'end'
break
else:
continue
Need to extract 'href'
If any one facing that, I wish this could help, you just have to choose well the element that you want to scroll
my_xpath = WebDriverWait(driver, 40).until(EC.presence_of_element_located((By.XPATH, "/html/body/div[8]/div[3]/div[3]/div/div/section[1]/div/div")))
driver.execute_script('arguments[0].scrollTop = arguments[0].scrollHeight', my_xpath)
Why do need to scroll here?
seems like you can get all of the element by command:
elements = driver.find_elements(By.XPATH, "//a[#class='result-card__full-card-link']")
and looks like:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get('https://www.linkedin.com/jobs/search/?f_C=1110%2C12800%2C5115950%2C3165553%2C603115%2C10916%2C8331%2C3297950%2C8238%2C5509188%2C3093%2C2625246%2C1112%2C947572%2C11018069%2C407323&geoId=92000000')
time.sleep(3)
def element_present():
try:
driver.find_element(By.XPATH, "//button[#class='infinite-scroller__show-more-button infinite-scroller__show-more-button--visible']")
except Exception:
return False
return True
while not element_present():
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
elements = driver.find_elements(By.XPATH, "//a[#class='result-card__full-card-link']")
hrefs = [el.get_attribute('href') for el in elements]
print(hrefs)
print(len(hrefs))
driver.quit()
might I missed smth, but seems like it works well as well

In selenium how to find out the exact number of XPATH links with different ids?

With Python3 and selenium I want to automate the search on a public information site. In this site it is necessary to enter the name of a person, then select the spelling chosen for that name (without or with accents or name variations), access a page with the list of lawsuits found and in this list you can access the page of each case.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
import time
import re
Name that will be searched
name = 'JOSE ROBERTO ARRUDA'
Create path, search start link, and empty list to store information
firefoxPath="/home/abraji/Documentos/Code/geckodriver"
link = 'https://ww2.stj.jus.br/processo/pesquisa/?aplicacao=processos.ea'
processos = []
Call driver and go to first search page
driver = webdriver.Firefox(executable_path=firefoxPath)
driver.get(link)
Position cursor, fill and click
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#idParteNome'))).click()
time.sleep(1)
driver.find_element_by_xpath('//*[#id="idParteNome"]').send_keys(name)
time.sleep(6)
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#idBotaoPesquisarFormularioExtendido'))).click()
Mark all spelling possibilities for searching
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#idBotaoMarcarTodos'))).click()
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#idBotaoPesquisarMarcados'))).click()
time.sleep(1)
Check how many pages of data there are - to be used in "for range"
capta = driver.find_element_by_xpath('//*[#id="idDivBlocoPaginacaoTopo"]/div/span/span[2]').text
print(capta)
paginas = int(re.search(r'\d+', capta).group(0))
paginas = int(paginas) + 1
print(paginas)
Capture routine
for acumula in range(1, paginas):
# Fill the field with the page number and press enter
driver.find_element_by_xpath('//*[#id="idDivBlocoPaginacaoTopo"]/div/span/span[2]/input').send_keys(acumula)
driver.find_element_by_xpath('//*[#id="idDivBlocoPaginacaoTopo"]/div/span/span[2]/input').send_keys(Keys.RETURN)
time.sleep(2)
# Captures the number of processes found on the current page - qt
qt = driver.find_element_by_xpath('//*[#id="idDivBlocoMensagem"]/div/b').text
qt = int(qt) + 2
print(qt)
# Iterate from found number of processes
for item in range(2, qt):
# Find the XPATH of each process link - start at number 2
vez = '//*[#id="idBlocoInternoLinhasProcesso"]/div[' + str(item) + ']/span[1]/span[1]/span[1]/span[2]/a'
print(vez)
# Access the direct link and click
element = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, vez)))
element.click()
# Run tests to get data
try:
num_unico = driver.find_element_by_xpath('//*[#id="idProcessoDetalhesBloco1"]/div[6]/span[2]/a').text
except NoSuchElementException:
num_unico = "sem_numero_unico"
try:
nome_proc = driver.find_element_by_xpath('//*[#id="idSpanClasseDescricao"]').text
except NoSuchElementException:
nome_proc = "sem_nome_encontrado"
try:
data_autu = driver.find_element_by_xpath('//*[#id="idProcessoDetalhesBloco1"]/div[5]/span[2]').text
except NoSuchElementException:
data_autu = "sem_data_encontrada"
# Fills dictionary and list
dicionario = {"num_unico": num_unico,
"nome_proc": nome_proc,
"data_autu": data_autu
}
processos.append(dicionario)
# Return a page to click on next process
driver.execute_script("window.history.go(-1)")
# Close driver
driver.quit()
In this case I captured the number of link pages (3) and the total number of links (84). So my initial idea was to do the "for" three times and within them split the 84 links
The direct address of each link is in XPATH (//*[#id="idBlocoInternoLinhasProcesso"]/div[41]/span[1]/span[1]/span[1]/span[2]/a) which I replace with the "item" to click
For example, when it arrives at number 42 I have an error because the first page only goes up to 41
My problem is how to go to the second page and then restart only "for" secondary
I think the ideal would be to know the exact number of links on each of the three pages
Anyone have any ideas?
Code below is "Capture routine":
wait = WebDriverWait(driver, 20)
#...
while True:
links = wait.until(EC.presence_of_all_elements_located((By.XPATH, "//span[contains(#class,'classSpanNumeroRegistro')]")))
print("links len", len(links))
for i in range(1, len(links) + 1):
# Access the direct link and click
.until(EC.element_to_be_clickable((By.XPATH, f"(//span[contains(#class,'classSpanNumeroRegistro')])[{i}]//a"))).click()
# Run tests to get data
try:
num_unico = driver.find_element_by_xpath('//*[#id="idProcessoDetalhesBloco1"]/div[6]/span[2]/a').text
except NoSuchElementException:
num_unico = "sem_numero_unico"
try:
nome_proc = driver.find_element_by_xpath('//*[#id="idSpanClasseDescricao"]').text
except NoSuchElementException:
nome_proc = "sem_nome_encontrado"
try:
data_autu = driver.find_element_by_xpath('//*[#id="idProcessoDetalhesBloco1"]/div[5]/span[2]').text
except NoSuchElementException:
data_autu = "sem_data_encontrada"
# Fills dictionary and list
dicionario = {"num_unico": num_unico,
"nome_proc": nome_proc,
"data_autu": data_autu
}
processos.append(dicionario)
# Return a page to click on next process
driver.execute_script("window.history.go(-1)")
# wait.until(EC.presence_of_element_located((By.CLASS_NAME, "classSpanPaginacaoImagensDireita")))
next_page = driver.find_elements_by_css_selector(".classSpanPaginacaoProximaPagina")
if len(next_page) == 0:
break
next_page[0].click()
You can try run the loop until next button is present on the screen. the logic will look like this,
try:
next_page = driver.find_element_by_class_name('classSpanPaginacaoProximaPagina')
if(next_page.is_displayed()):
next_page.click()
except NoSuchElementException:
print('next page does not exists')

python selenium: how to visit a web page many times on the same page

I'm using selenium with python to test my web server. Here is my test code:
i = 0
msg = 'abc'
while i < 10:
driver = webdriver.Chrome()
driver.get("http://www.example.com")
txt = driver.find_element_by_id('input-text')
txt.clear()
txt.send_keys(msg)
btn = driver.find_element_by_id('input-search')
btn.click()
driver.quit()
i += 1
The code works well except only one thing: it executes Chrome, do the test and close it for each time of loop. Obviously it's not necessary. What I need is simply to execute Chrome only one time and do many requests. I've tried as below but it doesn't work:
i = 0
msg = 'abc'
driver = webdriver.Chrome()
while i < 10:
driver.get("http://www.example.com")
txt = driver.find_element_by_id('input-text')
txt.clear()
txt.send_keys(msg)
btn = driver.find_element_by_id('input-search')
btn.click()
i += 1
driver.quit()
I think it's because in my test, there are two things:
1) fill abc in input-text;
2) click a button, submit the abc and open a new web page.
On the new page, there is also an input-text and a button input-search, so it will fill the abc and click the button on the new page, which is not what I want.
At btn.click(), it doesn't wait for page loaded because it is not a hyperlink and it doesn't submit a webform. So your script might be failed. You should fix it by waiting for some elements to determine if it should reload page or not. See code below.
try:
driver.get('http://www.example.com')
txt = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "input-text")))
txt.clear()
txt.send_keys(msg)
btn = driver.find_element_by_id('input-search')
btn.click()
countryDescriptionElement = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "country-description")))
#print(driver.page_source.encode('utf-8'))
except WebDriverException as ex:
print("Enter: " + msg + ", Error: " + str(ex) + ", Found: " + driver.page_source)

Categories

Resources