Loop is ending for clicking elements within list - python

My problem is that
1st part:
I have to fetch all the href values dynamically
2nd part:
After fetching I need to click on href value which are required(ex:i mean if dynamically I fetch 20 href value in that only 8 I need to click )
And catch is that the href value keeps on changing (1st part I know how to do but 2 part I am not sure how to implement)
hamburgerDrop = driver.find_element(By.XPATH,"//tr[#data-category-id='15']/td[3]")
elements = hamburgerDrop.find_elements(By.TAG_NAME,"a")
for link in elements:
if link.get_attribute('href'):
link.click()
print("the element found")
driver.back()
time.sleep(4)
break
else:
print("element is not found")
From the above code which I have written I am able to click only one href value if I want to click on the second I cant do it
for example:( thing is out of 20 href value i want to click only 8 )
So i wrote a code like this
def find_products(self):
elements = self.captureLinks(self.category_container,"xpath","a,li,div")
for link in elements:
if link.get_attribute('href') = 'https://example.com,/c/announcements' :
link.click()
self.log.info("The products link is found")
time.sleep(3)
else:
self.log.info("The products link is not found")
self.back()
time.sleep(5)
when my manager reviewed this code ,they told me with out comparing with href value it should click on the respective link .
def find_products(self):
elements = self.captureLinks(self.category_container,"xpath","a,li,div")
for link in elements:
if "announcements" in link.get_attribute('href') :
link.click()
self.log.info("The products link is found")
time.sleep(3)
else:
self.log.info("The products link is not found")
self.back()
time.sleep(5)
return elements
so i changed the code like this but when i executed i am getting the error
TypeError: argument of type 'NoneType' is not iterable

The problem is that your loop is being ended with break. Remove this and the loop should continue

Related

Cannot locate child element Selenium

Trying to pick out single restaurant elements under the All Restaurants category on this page. https://www.foodpanda.sg/restaurants/new?lat=1.2915902&lng=103.8379066&vertical=restaurants
All li elements have a different class name. The only working xpath, I have been able to figure out is this one.
//ul[#class="vendor-list"]//li[3]
I cannot figure out how to increase this number and get all the restaurants, which is complicated by the fact that there is an infinite scroll as well.
j = 1
def get_rest():
global j
while True:
driver.execute_script("window.scrollBy(0,2525)", "")
time.sleep(5)
var = f'{[j]}'
elems = driver.find_elements_by_xpath(f'//ul[#class="vendor-list"]//li{var})
return elems
I guess it should be something like this
all_restaurants = set()
restaurant_locator = '//ul[#class="vendor-list"]//li[#data-testid and not(#class)]'
page_bottom_locator = '.restaurants__city-bottom-info'
while #bottom is not reached:
restaurants = driver.find_elements_by_xpath(restaurant_locator)
all_restaurants.add(restaurants)
time.sleep(1)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
I tried to get the innerHTML of the //ul[#class="vendor-list"]//li
Code :
driver.get("https://www.foodpanda.sg/restaurants/new?lat=1.2915902&lng=103.8379066&vertical=restaurants")
while True:
for item in driver.find_elements(By.XPATH, "//ul[#class='vendor-list']/li"):
ActionChains(driver).move_to_element(item).perform()
sleep(0.1)
print(item.get_attribute('innerHTML'))

How to check if a number is in a web element with selenium

I am trying to trigger an action when the number 1 appears on a web element, but how do I check for it?
This is what I am trying to do, and I get the error 'WebElement' is not iterable
def is_0(self):
nav = Chrome()
nav.set_window_size(1360, 760)
while True:
if 1 in nav.find_element_by_xpath('//*[#id="header"]/nav/ul/li[4]/a/span/b'):
break
else:
print('Verificando')
sleep(2)
In order to get a text of an element, you should use ".text" or ".get_attribute('innerHTML')". I used ".get_attribute('innerHTML')" in my code.
Please refer to my code.
def is_0(self):
nav = Chrome()
nav.set_window_size(1360, 760)
while True:
if '1' in nav.find_element_by_xpath('//*[#id="header"]/nav/ul/li[4]/a/span/b').get_attribute('innerHTML'):
break
else:
print('Verificando')
sleep(2)

Selenium goes into infinite loop in Python

I am trying to scrape a website and fairly new to Python. I have managed to come up with below code. The problem however is it goes into an infinite loop after reaching the last page i.e Next button is greyed out.
Also i don't think i am catching the Stale Element properly here. Any help would be greatly appreciated!`
pages_remaining = True
while pages_remaining:
button=driver.find_element_by_class_name("arrow-right")
href_data = button.get_attribute('href')
if href_data is not None:
soup=BeautifulSoup(driver.page_source,"html.parser")
data = soup.find_all("div",{"class":"shelfProductStamp-content row"})
count = 1
for item in data:
ProductText=item.find("a",attrs={"class":"shelfProductStamp-imageLink"})["title"]
if item.find("span",attrs={"class":"sf-pricedisplay"}) is not None:
Price=item.find("span",attrs={"class":"sf-pricedisplay"}).text
else:
Price=""
if item.find("p",attrs={"class":"sf-comparativeText"}) is not None:
SubPrice1=item.find("p",attrs={"class":"sf-comparativeText"}).text
else:
SubPrice1=""
if item.find("span",attrs={"class":"sf-regoption"}) is not None:
Option=item.find("span",attrs={"class":"sf-regoption"}).text
else:
Option=""
SubPrice=str(SubPrice1)+"-"+str(Option)
SaleDates=item.find("div",attrs={"class":"sale-dates"}).text
urll2=driver.current_url
PageNo=driver.find_element_by_class_name("current").text
writer.writerow([ProductText,Price,SubPrice,SaleDates,PageNo])
count+=1
try:
def find(driver):
element = driver.find_element_by_class_name("arrow-right")
if element:
return element
else:
pages_remaining=False
#driver.quit()
time.sleep(10)
driver.implicitly_wait(10)
element = WebDriverWait(driver, 60).until(find)
driver.execute_script("arguments[0].click();", element)
except StaleElementReferenceException:
pass
else:
break
Thanks
When you set pages_remaining = False inside the find() function, that is a local variable. It is not the same variable as pages_remaining in the outer loop.
If you want to do it that way, you'll need to make it a global.
Thanks for your help here. I managed to fix this by simply adding another if statement at the end and swapping the time.sleep(10) as below
try:
def find(driver):
element = driver.find_element_by_class_name("arrow-right")
if element:
return element
driver.implicitly_wait(10)
element = WebDriverWait(driver, 60).until(find)
driver.execute_script("arguments[0].click();", element)
time.sleep(10)
except StaleElementReferenceException:
pass
if href_data is None:
break

Loop over list of elements for find_element_by_xpath() by Selenium and Webdriver

With Python, Selenium and Webdriver, a need to subsequently click elements found by texts, using the find_element_by_xpath() way on a webpage.
(an company internal webpage so excuse me cannot provide the url)
By xpath is the best way but there are multiple texts I want to locate and click.
It works when separately like:
driver.find_element_by_xpath("//*[contains(text(), 'Kate')]").click()
For multiple, here is what I tried:
name_list = ["Kate", "David"]
for name in name_list:
xpath = "//*[contains(text(), '"
xpath += str(name)
xpath += "')]"
print xpath
driver.find_element_by_xpath(xpath).click()
time.sleep(5)
The output of the print xpath looked ok however selenium says:
common.exceptions.NoSuchElementException
You can simplify your code as below:
for name in name_list:
driver.find_element_by_xpath("//*[contains(text(), '%s')]" % name).click()
or
for name in name_list:
try:
driver.find_element_by_xpath("//*[contains(text(), '{}')]".format(name)).click()
except:
print("Element with name '%s' is not found" % name)
Use string formatting. Put a placeholder into the xpath string and fill it with a variable value:
name_list = ["Kate", "David"]
for name in name_list:
xpath = "//*[contains(text(),'{}')]".format(name)
driver.find_element_by_xpath(xpath).click()
Try this:
name_list = ["Kate", "David"]
for name in name_list:
xpath = "//*[contains(text(), '" + str(name) + "')]" # simplified
print xpath
list = driver.find_elements_by_xpath(xpath) # locate all elements by xpath
if len(list) > 0: # if list is not empty, click on element
list[0].click() # click on the first element in the list
time.sleep(5)
This will prevent from throwing
common.exceptions.NoSuchElementException
Note: also make sure, that you using the correct xPath.

When using ActionChains, is it possible to click on an element, perform some actions, go back to the previous page and continue to the next item?

I'm using ActionChains in a loop and I'm trying to figure out how to click on an item and perform some actions if certain conditions are met, in my case if the item isn't sold out and go back to the previous page to continue the loop. Is it possible to do so or should I be using a different method? The code I have below works fine until I click on the available item and go to a different url, which triggers a stale element error.
articles = driver.find_elements_by_tag_name('article')
for article in articles:
ActionChains(driver).move_to_element(article).perform()
if article.find_element_by_tag_name('a').text == "sold out":
print("sold out")
else:
print("available")
name = article.find_element_by_xpath("div/h1/a")
color = article.find_element_by_xpath("div/p/a")
name_text = name.text
color_text = color.text
print name_text, color_text
link = article.find_element_by_xpath('div/a').get_attribute('href')
print(link)
driver.find_element_by_xpath("""//*[#id="add-remove-
buttons"]/input""").click()
(...)
(...)
continue
new set-up although I'm getting a 'list index out of range' error now.
articles = driver.find_elements_by_tag_name('article')
for i in range(len(articles)):
article = driver.find_elements_by_tag_name('article')[i]
if article.find_element_by_tag_name('a').text == "sold out":
print("sold out")
else:
print("available")
name = article.find_element_by_xpath("div/h1/a")
color = article.find_element_by_xpath("div/p/a")
name_text = name.text
color_text = color.text
print name_text, color_text
link = article.find_element_by_xpath('div/a').get_attribute('href')
print(link)
driver.get(link)
(...)
(...)
continue
You get a StaleElementReferenceException whenever you hold a variable to an element on the page and something in the page changes or the page refreshes.
What you should do initiate a new variable inside the loop. In this way, every time you iterate through the loop and the page is (possibly) refreshed, there is a new variable and the exception doesn't occur.
Something like this to start your loop.
# look up the articles one time outside of the loop, so we can check the number of articles
articles = driver.find_elements_by_tag_name('article')
# loop through the articles, based on the number of articles (length of list)
for i in range(len(articles)):
# each time in the for loop, look up the list of articles again and then go for element number i
article = driver.find_elements_by_tag_name('article')[i]

Categories

Resources