Going through paginated table with Selenium in Python - python

I am going through the table at the following link:
http://cancer.sanger.ac.uk/cosmic/sample/overview?id=2120881
through selenium in python. This is the code:
driver = webdriver.Chrome()
driver.get('http://cancer.sanger.ac.uk/cosmic/sample/overview?id=2120881')
elem = driver.find_element_by_link_text("Variants")
while elem:
elem.click()
time.sleep(5)
try:
elem = driver.find_element_by_link_text("Next")
print(elem.is_enabled())
if 'disabled' in elem.get_attribute('class'):
break
except:
print('Next not available or page not loaded!')
driver.quit()
I have trouble changing the number of displayed values to 100. How would I do that?
Also, why does the is_enabled() return True even when the button becomes unclickable?
Thanks in advance!

Q: I have trouble changing the number of displayed values to 100. How would I do that?
Solution: You have to use Select Class (link) to select value from Dropdown
Question:why does the is_enabled() return True even when the button becomes unclickable?
The is_enabled() is returning True only when it is as you can see the True is only printed three times when it was enabled and post that its enter in break loop and exits as expected
Just updated your code with Select and break print statement:
from selenium import webdriver
import time
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('http://cancer.sanger.ac.uk/cosmic/sample/overview?id=2120881')
elem = driver.find_element_by_link_text("Variants")
while elem:
elem.click()
time.sleep(5)
select = Select(driver.find_element_by_name('DataTables_Table_0_length'))
select.select_by_value('100') # you can use any value
try:
elem = driver.find_element_by_link_text("Next")
print(elem.is_enabled())
if 'disabled' in elem.get_attribute('class'):
print "Before Break"
break
except:
print('Next not available or page not loaded!')
driver.quit()
The output is:
True
True
True
Before Break

Related

how to know if element disappear and then appear in seleniuum python

i wanna know if element appear then disappear i want to apply it in this code
i know that internet is back by a text appear when internet is gone or its back
except:
time.sleep(3)
if driver.find_element(By.CLASS_NAME, "KhLQZTRq").size['width'] != 0 :
print ('internet lost')
while True:
time.sleep(5)
if driver.find_element(By.CLASS_NAME, "KhLQZTRq").size['width'] == 0 :
print("internet back")
else:
continue
else:
print("nothing happen")
This is working fine:
if driver.find_element(By.CLASS_NAME, "KhLQZTRq").size['width'] != 0
and it print 'internet lost' when the the element appear
But this is not working:
if driver.find_element(By.CLASS_NAME, "KhLQZTRq").size['width'] == 0
i just want it to print " internet back " if the element is no longer present
If the element appears or disappears you could write
if driver.find_element(By.CLASS_NAME, "KhLQZTRq"):
It will return True if the element exists and False if it doesn't

Unclickable button in selenium

from selenium import webdriver
from time import sleep
from selenium.webdriver.common.by import By
chrome_options = webdriver.ChromeOptions()
driver = webdriver.Chrome(r'chromedriver.exe', options=chrome_options)
url = 'https://rarible.com/connect'
b = 1
def main():
driver.get(url)
input('Login..(Press ENTER when finsihed)')
sleep(1)
while driver.current_url == "https://rarible.com/":
ts_href = '/html/body/div[1]/div[2]/div[2]/div[2]/div/div/div/div[4]/div[2]/div/div[1]/div[1]/div/div/div[' \
'1]/div[1]/div/div[3]/a'
href = driver.find_element(By.XPATH, ts_href).get_attribute('href')
driver.get(href)
sleep(2)
followers = '/html/body/div/div[2]/div[2]/div[2]/div[2]/div/div/div/div[1]/div[5]/div/button[1]/span[2]'
driver.find_element(By.XPATH, followers).click()
sleep(3)
# buttons = driver.find_elements(By.XPATH, '//button[normalize-space()="Follow"]')
def butts():
global b
fbtn = f'/html/body/div/div[1]/div/div[2]/div/div[2]/div/div/div/div[{b}]/div/div/div[3]/button'
buttons = driver.find_element(By.XPATH, fbtn)
print(f'BUTTON {b} TEXT (FOLLOW/UNFOLLOW): {buttons.text}')
if buttons.text == "Follow":
buttons.click()
b += 1
sleep(1)
butts()
elif buttons.text == "Unfollow":
b += 1
butts()
butts()
print('All set here, onto the next one...')
else:
driver.get('https://rarible.com/')
if __name__ == '__main__':
main()
I cannot get it to click the follow buttons.
I Can't find an iframe that they are hiding in, or any other type of javascript voodoo being done but i am not the most experienced. Which is why i come to you seeking guidance.
The line where you define followers wasn't working for me so I changed it to
followers = '//button[#datamarker="root/appPage/address/profile/followingLinks/followers"]'
Trying to figure out how to get the butts() loop to run, but I don't have any followers on rarible so it's kind of hard to test. However, putting these lines of code in at the end of "if buttons.text == "Follow": before butts() is called again might work.
scroll_div = driver.find_element(By.XPATH, '/html/body/div/div[1]/div/div[2]/div/div[2]/div/div/div')
scroll_div.send_keys(Keys.ARROW_DOWN)
It's not actually an iframe. The solution has something to do with scrolling through the follow buttons

How to check if a number is in a web element with selenium

I am trying to trigger an action when the number 1 appears on a web element, but how do I check for it?
This is what I am trying to do, and I get the error 'WebElement' is not iterable
def is_0(self):
nav = Chrome()
nav.set_window_size(1360, 760)
while True:
if 1 in nav.find_element_by_xpath('//*[#id="header"]/nav/ul/li[4]/a/span/b'):
break
else:
print('Verificando')
sleep(2)
In order to get a text of an element, you should use ".text" or ".get_attribute('innerHTML')". I used ".get_attribute('innerHTML')" in my code.
Please refer to my code.
def is_0(self):
nav = Chrome()
nav.set_window_size(1360, 760)
while True:
if '1' in nav.find_element_by_xpath('//*[#id="header"]/nav/ul/li[4]/a/span/b').get_attribute('innerHTML'):
break
else:
print('Verificando')
sleep(2)

Selenium goes into infinite loop in Python

I am trying to scrape a website and fairly new to Python. I have managed to come up with below code. The problem however is it goes into an infinite loop after reaching the last page i.e Next button is greyed out.
Also i don't think i am catching the Stale Element properly here. Any help would be greatly appreciated!`
pages_remaining = True
while pages_remaining:
button=driver.find_element_by_class_name("arrow-right")
href_data = button.get_attribute('href')
if href_data is not None:
soup=BeautifulSoup(driver.page_source,"html.parser")
data = soup.find_all("div",{"class":"shelfProductStamp-content row"})
count = 1
for item in data:
ProductText=item.find("a",attrs={"class":"shelfProductStamp-imageLink"})["title"]
if item.find("span",attrs={"class":"sf-pricedisplay"}) is not None:
Price=item.find("span",attrs={"class":"sf-pricedisplay"}).text
else:
Price=""
if item.find("p",attrs={"class":"sf-comparativeText"}) is not None:
SubPrice1=item.find("p",attrs={"class":"sf-comparativeText"}).text
else:
SubPrice1=""
if item.find("span",attrs={"class":"sf-regoption"}) is not None:
Option=item.find("span",attrs={"class":"sf-regoption"}).text
else:
Option=""
SubPrice=str(SubPrice1)+"-"+str(Option)
SaleDates=item.find("div",attrs={"class":"sale-dates"}).text
urll2=driver.current_url
PageNo=driver.find_element_by_class_name("current").text
writer.writerow([ProductText,Price,SubPrice,SaleDates,PageNo])
count+=1
try:
def find(driver):
element = driver.find_element_by_class_name("arrow-right")
if element:
return element
else:
pages_remaining=False
#driver.quit()
time.sleep(10)
driver.implicitly_wait(10)
element = WebDriverWait(driver, 60).until(find)
driver.execute_script("arguments[0].click();", element)
except StaleElementReferenceException:
pass
else:
break
Thanks
When you set pages_remaining = False inside the find() function, that is a local variable. It is not the same variable as pages_remaining in the outer loop.
If you want to do it that way, you'll need to make it a global.
Thanks for your help here. I managed to fix this by simply adding another if statement at the end and swapping the time.sleep(10) as below
try:
def find(driver):
element = driver.find_element_by_class_name("arrow-right")
if element:
return element
driver.implicitly_wait(10)
element = WebDriverWait(driver, 60).until(find)
driver.execute_script("arguments[0].click();", element)
time.sleep(10)
except StaleElementReferenceException:
pass
if href_data is None:
break

Loop is ending for clicking elements within list

My problem is that
1st part:
I have to fetch all the href values dynamically
2nd part:
After fetching I need to click on href value which are required(ex:i mean if dynamically I fetch 20 href value in that only 8 I need to click )
And catch is that the href value keeps on changing (1st part I know how to do but 2 part I am not sure how to implement)
hamburgerDrop = driver.find_element(By.XPATH,"//tr[#data-category-id='15']/td[3]")
elements = hamburgerDrop.find_elements(By.TAG_NAME,"a")
for link in elements:
if link.get_attribute('href'):
link.click()
print("the element found")
driver.back()
time.sleep(4)
break
else:
print("element is not found")
From the above code which I have written I am able to click only one href value if I want to click on the second I cant do it
for example:( thing is out of 20 href value i want to click only 8 )
So i wrote a code like this
def find_products(self):
elements = self.captureLinks(self.category_container,"xpath","a,li,div")
for link in elements:
if link.get_attribute('href') = 'https://example.com,/c/announcements' :
link.click()
self.log.info("The products link is found")
time.sleep(3)
else:
self.log.info("The products link is not found")
self.back()
time.sleep(5)
when my manager reviewed this code ,they told me with out comparing with href value it should click on the respective link .
def find_products(self):
elements = self.captureLinks(self.category_container,"xpath","a,li,div")
for link in elements:
if "announcements" in link.get_attribute('href') :
link.click()
self.log.info("The products link is found")
time.sleep(3)
else:
self.log.info("The products link is not found")
self.back()
time.sleep(5)
return elements
so i changed the code like this but when i executed i am getting the error
TypeError: argument of type 'NoneType' is not iterable
The problem is that your loop is being ended with break. Remove this and the loop should continue

Categories

Resources