First of all I should inform you that I have very little experience in programming. And I have some trouble with the logic and flow of a general webscraper implemented in python. I assume that I should use callbacks and similar methods in order to properly control the process of saving pages from a javascript e-book reader. My script does work, but not consistently. If someone could advice me on improvements that should be made to this script, that would be great. Thank you.
from seleniumwire.utils import decode as sdecode
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options # [!]
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
import os.path
opts = Options() # [!]
opts.add_experimental_option('w3c', True) # [!]
capabilities = DesiredCapabilities.CHROME.copy()
driver = webdriver.Chrome(chrome_options=opts, desired_capabilities=capabilities)
url = ' here comes url'
driver.get(url)
def get_requests():
l = []
for rx in driver.requests:
#endmark = '&scale=2&rotate=0' lenght must be 17
if rx.url[-17:]==endmark:
l.append(rx.url)
return list(set(l))
def savepages(diff):
newpages = 0
for urlitem in diff:
for request in driver.requests:
if request.url==urlitem:
#print(request.url)
ind = urlitem.find('.jp2&id') # ex. 0012.jp2&id
file_path = directory_path + '\\' + file_name + urlitem[ind-4:ind] + '.jpg'
tik = 0
while tik<10: #waiting for the response body data
try:
tik += 1
data = sdecode(request.response.body, request.response.headers.get('Content-Encoding', 'identity'))
except AttributeError: # no data error
time.sleep(2) # wait for 2 sec for the data
continue
#data = data.decode("utf-8",'ignore')
# sometimes I get this error 'UnboundLocalError: local variable 'data' referenced before assignment'
# I assumed that the following condition will help but it doesn't seem to work consistently
if data:
with open(file_path, 'wb') as outfile:
outfile.write(data) # sometimes I get UnboundLocalError
else: print('no data')
# was the file saved or not
if os.path.exists(file_path):
newpages += 1 # smth is wrong with the counting logic, since pages+newpages should be equal to the lenght of li=get_requests(), I get more
else:
time.sleep(.5)
return newpages
count = 0 # a counter, should terminate the main delay loop
pages = 0 # counting all saved pages; book pages or images are equivalent, one turn should open 2 new pages/images/requests
oldli = [] #compare to the new list after each delay cycle
turns = 0 #count how many turns have been made or how many times we clicked on the button Next Page
li = get_requests() # get all unique requests of the images/pages, some requests might be still loading, but we manually opened the first page and visually confirmed that there are at least 1 or 3 images/requests
if li: # the program STARTS HERE, first try, there are some requests because we manually opened the first page
# THE MAIN CYCLE should stop when the delay is too long and we turned all the pages of the book
while 2*turns+1<len(li) or count<15: # should terminate the whole program when there is no more images coming
count = 0 #reset counter
success = False #reset success; new pages downloaded successfully
# the main delay counter
# what happens if diff is [] and no success
while True:
count += 1
if count > 14:
print('Time out after more than 10 seconds.')
break
li = get_requests() # in addition, I assume that all requests counting from page 1 will be kept
# it is possible that li will not have some of the old requests and oldli will be longer
# well, I need to keep all old requests in a separate list and then append to it
diff = list(set(li)-set(oldli)) # find new requests after the delay
if diff: # there are some new
npages = savepages(diff) # saves new images and returns the number of them
print('newpages ',npages, ' len diff ', len(diff)) # should be equal
if npages >= len(diff)-1: # we allow one request without a body with data ??
pages += npages # smth is not ok here, the number of pages sometimes exceeds the length of li
success = True # we call it a success
else:
print('Could not save pages. Newpages ', npages, ' len diff ', len(diff))
for pg in diff:
print(pg) # for debuging purposes
break # in this case you break from the delay cycle
else: time.sleep(2) # if no new requests add 2 sec to the waiting time
if success: # we turn pages in case of successful download, this is bad if we need to catch up
while 2*turns+1 < len(li): # if some of old requests are deleted then the program will stop earlier
# it won't wait for the bodies of requests, there is a problem
driver.find_elements(By.CLASS_NAME, "BRicon.book_right.book_flip_next")[0].click()
turns += 1
time.sleep(3) # I got the impression that this doesn't happen
oldli = li
print('pages ',pages,' length of list ',len(li))
break # we break from the delay cycle since success
time.sleep(2) # the main delay timer;; plus no diff timer = total time
else: print('no requests in the list to process') ```
I'm following a Selenium tutorial for an Amazon price tracker (Clever Programming on Youtube) and I got stuck at getting the links from amazon using their techniques.
tutorial link: https://www.youtube.com/watch?v=WbJeL_Av2-Q&t=4315s
I realized the problem laid on the fact that I'm only getting one link out of the 17 available after doing the product search. I need to get all the links for every product after doing a search and them use then to get into each product and get their title, seller and price.
funtion get_products_links() should get all links and stores them into a list to be used by the function get_product_info()
def get_products_links(self):
self.driver.get(self.base_url) # Go to amazon.com using BASE_URL
element = self.driver.find_element_by_id('twotabsearchtextbox')
element.send_keys(self.search_term)
element.send_keys(Keys.ENTER)
time.sleep(2) # Wait to load page
self.driver.get(f'{self.driver.current_url}{self.price_filter}')
time.sleep(2) # Wait to load page
result_list = self.driver.find_elements_by_class_name('s-result-list')
links = []
try:
### Tying to get a list for Xpath links attributes ###
### Only numbers from 3 to 17 work after doing product search where 'i' is placed in the XPATH ###
i = 3
results = result_list[0].find_elements_by_xpath(
f'//*[#id="search"]/div[1]/div[1]/div/span[3]/div[2]/div[{i}]/div/div/div/div/div/div[1]/div/div[2]/div/span/a')
links = [link.get_attribute('href') for link in results]
return links
except Exception as e:
print("Didn't get any products...")
print(e)
return links
At this point get_products_links() only returns one link since I just made 'i' a fixed value of 3 to make it work for now.
I was thinking to iterate 'i' in some sort so I can save every different PATHs but I don't know how to implement this.
I've tried performing a for loop and append the result into a new list but them the app stops working
Here is the complete code:
from amazon_config import(
get_web_driver_options,
get_chrome_web_driver,
set_browser_as_incognito,
set_ignore_certificate_error,
NAME,
CURRENCY,
FILTERS,
BASE_URL,
DIRECTORY
)
import time
from selenium.webdriver.common.keys import Keys
class GenerateReport:
def __init__(self):
pass
class AmazonAPI:
def __init__(self, search_term, filters, base_url, currency):
self.base_url = base_url
self.search_term = search_term
options = get_web_driver_options()
set_ignore_certificate_error(options)
set_browser_as_incognito(options)
self.driver = get_chrome_web_driver(options)
self.currency = currency
self.price_filter = f"&rh=p_36%3A{filters['min']}00-{filters['max']}00"
def run(self):
print("Starting script...")
print(f"Looking for {self.search_term} products...")
links = self.get_products_links()
time.sleep(1)
if not links:
print("Stopped script.")
return
print(f"Got {len(links)} links to products...")
print("Getting info about products...")
products = self.get_products_info(links)
# self.driver.quit()
def get_products_info(self, links):
asins = self.get_asins(links)
product = []
for asin in asins:
product = self.get_single_product_info(asin)
def get_single_product_info(self, asin):
print(f"Product ID: {asin} - getting data...")
product_short_url = self.shorten_url(asin)
self.driver.get(f'{product_short_url}?language=en_GB')
time.sleep(2)
title = self.get_title()
seller = self.get_seller()
price = self.get_price()
def get_title(self):
try:
return self.driver.find_element_by_id('productTitle')
except Exception as e:
print(e)
print(f"Can't get title of a product - {self.driver.current_url}")
return None
def get_seller(self):
try:
return self.driver.find_element_by_id('bylineInfo')
except Exception as e:
print(e)
print(f"Can't get title of a product - {self.driver.current_url}")
return None
def get_price(self):
return '$99'
def shorten_url(self, asin):
return self.base_url + 'dp/' + asin
def get_asins(self, links):
return [self.get_asin(link) for link in links]
def get_asin(self, product_link):
return product_link[product_link.find('/dp/') + 4:product_link.find('/ref')]
def get_products_links(self):
self.driver.get(self.base_url) # Go to amazon.com using BASE_URL
element = self.driver.find_element_by_id('twotabsearchtextbox')
element.send_keys(self.search_term)
element.send_keys(Keys.ENTER)
time.sleep(2) # Wait to load page
self.driver.get(f'{self.driver.current_url}{self.price_filter}')
time.sleep(2) # Wait to load page
result_list = self.driver.find_elements_by_class_name('s-result-list')
links = []
try:
### Tying to get a list for Xpath links attributes ###
### Only numbers from 3 to 17 work after doing product search where 'i' is placed ###
i = 3
results = result_list[0].find_elements_by_xpath(
f'//*[#id="search"]/div[1]/div[1]/div/span[3]/div[2]/div[{i}]/div/div/div/div/div/div[1]/div/div[2]/div/span/a')
links = [link.get_attribute('href') for link in results]
return links
except Exception as e:
print("Didn't get any products...")
print(e)
return links
if __name__ == '__main__':
print("HEY!!!ππ₯")
amazon = AmazonAPI(NAME, FILTERS, BASE_URL, CURRENCY)
amazon.run()
Steps to Run the script:
Step 1:
install Selenium==3.141.0 into your virtual environment
Step 2:
Search for Chrome Drivers on google and download the driver that matches you Chrome version. After download, extract the driver and paste it into your working folder
Step 3:
create a file called amazon_config.py and insert the following code:
from selenium import webdriver
DIRECTORY = 'reports'
NAME = 'PS4'
CURRENCY = '$'
MIN_PRICE = '275'
MAX_PRICE = '650'
FILTERS = {
'min': MIN_PRICE,
'max': MAX_PRICE
}
BASE_URL = "https://www.amazon.com/"
def get_chrome_web_driver(options):
return webdriver.Chrome('./chromedriver', chrome_options=options)
def get_web_driver_options():
return webdriver.ChromeOptions()
def set_ignore_certificate_error(options):
options.add_argument('--ignore-certificate-errors')
def set_browser_as_incognito(options):
options.add_argument('--incognito')
If you performed the steps correctly you should be able to run the script and it will perform the following:
Go to www.amazon.com
Search for a product (In this case "PS4")
Get a link for the first product
Visit that product link
Terminal should print:
HEY!!!ππ₯
Starting script...
Looking for PS4 products...
Got 1 links to products...
Getting info about products...
Product ID: B012CZ41ZA - getting data...
What I'm not able to do is to get all links and iterate them so the script will visit all links in the first page
If you are able to get all links, the terminal should print:
HEY!!!ππ₯
Starting script...
Looking for PS4 products...
Got 1 links to products...
Getting info about products...
Product ID: B012CZ41ZA - getting data...
Product ID: XXXXXXXXXX - getting data...
Product ID: XXXXXXXXXX - getting data...
Product ID: XXXXXXXXXX - getting data...
# and so on until all links are visited
I can't run it so I only guess how I would do it.
I would put all try/except in for-loop, and use links.append() instead of links = [...], and I would use return after exiting loop
# --- before loop ---
links = []
# --- loop ---
for i in range(3, 18):
try:
results = result_list[0].find_elements_by_xpath(
f'//*[#id="search"]/div[1]/div[1]/div/span[3]/div[2]/div[{i}]/div/div/div/div/div/div[1]/div/div[2]/div/span/a')
for link in results:
links.append(link.get_attribute('href'))
except Exception as e:
print(f"Didn't get any products... (i = {i})")
print(e)
# --- after loop ---
return links
But I would also try to use xpath with // to skip most of divs - and maybe if I would skip div[{i}] then I could get all products without for-loop.
BTW:
In get_products_info() I see similar problem - you create empty list product = [] but later in loop you assing value to product = ... so you remove previous value from product. It would need product.append() to keep all values.
Something like
def get_products_info(self, links):
# --- before loop ---
asins = self.get_asins(links)
product = []
# --- loop ---
for asin in asins:
product.append( self.get_single_product_info(asin) )
# --- after loop ---
return product
I have a small script that fetches company data from a website. This website gets regularly updated with new company information. How can I update my csv with new records on a periodic basis? Also as you can see in the code I have used an explicit range for the pages, what other solutions are possible?
The following is the code -
from selenium.webdriver import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from time import sleep
import csv
#navigate to the ystory companies page
#start collecting data from ystory
START_URL = 'https://yourstory.com/companies/search?page=1&hitsPerPage=30'
#when the collection populates 30 elements then click on next page
class CompDeetz():
def __init__(self):
self.browser = Firefox()
self.browser.get(START_URL)
sleep(20)
self.browser.find_element_by_xpath('/html/body/div[12]/div/div/button').click()
sleep(5)
self.browser.find_element_by_xpath('/html/body/div[1]/div[4]').click()
self.database = []
def write_row(self,record):
with open('test.csv', 'a') as t:
writer = csv.writer(t)
writer.writerows(record)
def get_everything(self):
all_list = [ (a.text) for a in self.browser.find_elements_by_xpath('//tr[#class="hit"]')]
all_records = []
for company in all_list:
record = company.split('\n')
all_records.append(record)
self.write_row(all_records)
def next_page(self):
self.browser.find_element_by_xpath('//ul[#class="ais-Pagination-list"]/li[7]/a').click()
sleep(20)
def main():
t = CompDeetz()
t.get_everything()
for i in range(33):
t.next_page()
t.get_everything()
if __name__ == "__main__":
main()
Instead of having two different methods get_everything and next_page and calling them multiple times. You can have one method get_everything and call it once.
def get_everything(self):
all_records = []
nextPage = True
while nextPage:
all_list = [ (a.text) for a in self.browser.find_elements_by_xpath('//tr[#class="hit"]')]
for company in all_list:
record = company.split('\n')
all_records.append(record)
try:
nextPagelink = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//a[#aria-label='Next page']")))
driver.execute_script("arguments[0].scrollIntoView();", nextPagelink)
driver.execute_script("arguments[0].click();", nextPagelink)
time.sleep(5) # for next [age to load
#As on last page, next page link is not available. It will throw exception
except NoSuchElementException:
nextpage = False
self.write_row(all_records)
Note : take care of Pop up coming on page. I hope you already have mechanism to handle it.
I am trying to iterate through a list that refreshes every 10 sec.
this is what I have tried:
driver.get("https://www.winmasters.ro/ro/live-betting/")
events = driver.find_elements_by_css_selector('.event-wrapper.v1.event-live.odds-hidden.event-sport-1')
for i in range(len(events)):
try:
event = events[i]
name = event.find_element_by_css_selector('.event-details-team-name.event-details-team-a')# the error occurs here
except: # NoSuchElementException or StaleElementReferenceException
time.sleep(3) # i have tried up to 20 sec
event = events[i]
name = event.find_element_by_css_selecto('.event-details-team-name.event-details-team-a')
this did not work so I tried another except
except: # second try that also did not work
element = WebDriverWait(driver, 20).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, '.event-details-team-name.event-details-team-a'))
)
name = event.find_element_by_css_selecto('.event-details-team-name.event-details-team-a')
Now I am assigning something that I will never use to name like:
try:
event = events[i]
name = event.find_element_by_css_selector('.event-details-team-name.event-details-team-a')
except:
name = "blablabla"
With this code when the page refreshes I get about 7 or 8 of the "blablabla" until it finds my selector again from the webpage
You can get all required data using JavaScript.
Code below will give you list of events map with all details instantly and without NoSuchElementException or StaleElementReferenceException errors:
me_id : unique identificator
href : href with details which you can use to get details
team_a : name of the first team
team_a_score : score of the first team
team_b : name of the second team
team_b_score : score of the second team
event_status : status of the event
event_clock : time of the event
def events = driver.execute_script('return [...document.querySelectorAll(\'[data-uat="live-betting-overview-leagues"] .events-for-league .event-live\')].map(e=>{return {me_id:e.getAttribute("me_id"), href:e.querySelector("a.event-details-live").href, team_a:e.querySelector(".event-details-team-a").textContent, team_a_score:e.querySelector(".event-details-score-1").textContent, team_b:e.querySelector(".event-details-team-b").textContent, team_b_score:e.querySelector(".event-details-score-2").textContent, event_status:e.querySelector(\'[data-uat="event-status"]\').textContent, event_clock:e.querySelector(\'[data-uat="event-clock"]\').textContent}})')
for event in events:
print(event.get('me_id'))
print(event.get('href')) #using href you can open event details using: driver.get(event.get('href'))
print(event.get('team_a'))
print(event.get('team_a_score'))
print(event.get('team_b'))
print(event.get('team_b_score'))
print(event.get('event_status'))
print(event.get('event_clock'))
One primary problem is that you are acquiring all of the elements up front, and then iterating through that list. As the page itself is updating frequently, the elements you've already acquired have gone "stale", meaning they are not long associated with current DOM objects. When you try to use those stale elements, Selenium throw StaleElementReferenceExceptions because it has no way of doing anything with those now out-of-date objects.
One way to overcome this is to only acquire and use an element right as you need it, rather than fetching them all up front. I personally feel the cleanest approach is to use the CSS :nth-child() approach:
from selenium import webdriver
def main():
base_css = '.event-wrapper.v1.event-live.odds-hidden.event-sport-1'
driver = webdriver.Chrome()
try:
driver.get("https://www.winmasters.ro/ro/live-betting/")
# Get a list of all elements
events = driver.find_elements_by_css_selector(base_css)
print("Found {} events".format(len(events)))
# Iterate through the list, keeping track of the index
# note that nth-child referencing begins at index 1, not 0
for index, _ in enumerate(events, 1):
name = driver.find_element_by_css_selector("{}:nth-child({}) {}".format(
base_css,
index,
'.event-details-team-name.event-details-team-a'
))
print(name.text)
finally:
driver.quit()
if __name__ == "__main__":
main()
If I run the above script, I get this output:
$ python script.py
Found 2 events
Hapoel Haifa
FC Ashdod
Now, as the underlying webpage really does update a lot, there is still a decent chance you can get a SERE error. To overcome that you can use a retry decorator (pip install retry to get the package) to handle the SERE and reacquire the element:
import retry
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
#retry.retry(StaleElementReferenceException, tries=3)
def get_name(driver, selector):
elem = driver.find_element_by_css_selector(selector)
return elem.text
def main():
base_css = '.event-wrapper.v1.event-live.odds-hidden.event-sport-1'
driver = webdriver.Chrome()
try:
driver.get("https://www.winmasters.ro/ro/live-betting/")
events = driver.find_elements_by_css_selector(base_css)
print("Found {} events".format(len(events)))
for index, _ in enumerate(events, 1):
name = get_name(
driver,
"{}:nth-child({}) {}".format(
base_css,
index,
'.event-details-team-name.event-details-team-a'
)
)
print(name)
finally:
driver.quit()
if __name__ == "__main__":
main()
Now, despite the above examples, I think you still have issues with your CSS selectors, which is the primary reason for the NoSuchElement exceptions. I can't help with that without a better description of what you are actually trying to accomplish with this script.
I have a list, which is dynamically loaded by AJAX.
At first, while loading, it's code is like this:
<ul><li class="last"><a class="loading" href="#"><ins> </ins>ΠΠ°Π³ΡΡΠ·ΠΊΠ°...</a></li></ul>
When the list is loaded, all of it li and a are changed. And it's always more than 1 li.
Like this:
<ul class="ltr">
<li id="t_b_68" class="closed" rel="simple">
<a id="t_a_68" href="javascript:void(0)">Category 1</a>
</li>
<li id="t_b_64" class="closed" rel="simple">
<a id="t_a_64" href="javascript:void(0)">Category 2</a>
</li>
...
I need to check if list is loaded, so I check if it has several li.
So far I tried:
1) Custom waiting condition
class more_than_one(object):
def __init__(self, selector):
self.selector = selector
def __call__(self, driver):
elements = driver.find_elements_by_css_selector(self.selector)
if len(elements) > 1:
return True
return False
...
try:
query = WebDriverWait(driver, 30).until(more_than_one('li'))
except:
print "Bad crap"
else:
# Then load ready list
2) Custom function based on find_elements_by
def wait_for_several_elements(driver, selector, min_amount, limit=60):
"""
This function provides awaiting of <min_amount> of elements found by <selector> with
time limit = <limit>
"""
step = 1 # in seconds; sleep for 500ms
current_wait = 0
while current_wait < limit:
try:
print "Waiting... " + str(current_wait)
query = driver.find_elements_by_css_selector(selector)
if len(query) > min_amount:
print "Found!"
return True
else:
time.sleep(step)
current_wait += step
except:
time.sleep(step)
current_wait += step
return False
This doesn't work, because driver (current element passed to this function) gets lost in DOM. UL isn't changed but Selenium can't find it anymore for some reason.
3) Excplicit wait. This just sucks, because some lists are loaded instantly and some take 10+ secs to load. If I use this technique I have to wait max time every occurence, which is very bad for my case.
4) Also I can't wait for child element with XPATH correctly. This one just expects ul to appear.
try:
print "Going to nested list..."
#time.sleep(WAIT_TIME)
query = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, './/ul')))
nested_list = child.find_element_by_css_selector('ul')
Please, tell me the right way to be sure, that several heir elements are loaded for specified element.
P.S. All this checks and searches should be relative to current element.
First and foremost the elements are AJAX elements.
Now, as per the requirement to locate all the desired elements and create a list, the simplest approach would be to induce WebDriverWait for the visibility_of_all_elements_located() and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
elements = WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, "ul.ltr li[id^='t_b_'] > a[id^='t_a_'][href]")))
Using XPATH:
elements = WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.XPATH, "//ul[#class='ltr']//li[starts-with(#id, 't_b_')]/a[starts-with(#id, 't_a_') and starts-with(., 'Category')]")))
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Incase your usecase is to wait for certain number of elements to be loaded e.g. 10 elements, you can use you can use the lambda function as follows:
Using >:
myLength = 9
WebDriverWait(driver, 20).until(lambda driver: len(driver.find_elements_by_xpath("//ul[#class='ltr']//li[starts-with(#id, 't_b_')]/a[starts-with(#id, 't_a_') and starts-with(., 'Category')]")) > int(myLength))
Using ==:
myLength = 10
WebDriverWait(driver, 20).until(lambda driver: len(driver.find_elements_by_xpath("//ul[#class='ltr']//li[starts-with(#id, 't_b_')]/a[starts-with(#id, 't_a_') and starts-with(., 'Category')]")) == int(myLength))
You can find a relevant discussion in How to wait for number of elements to be loaded using Selenium and Python
References
You can find a couple of relevant detailed discussions in:
Getting specific elements in selenium
Cannot find table element from div element in selenium python
Extract text from an aria-label selenium webdriver (python)
I created AllEc which basically piggybacks on WebDriverWait.until logic.
This will wait until the timeout occurs or when all of the elements have been found.
from typing import Callable
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import StaleElementReferenceException
class AllEc(object):
def __init__(self, *args: Callable, description: str = None):
self.ecs = args
self.description = description
def __call__(self, driver):
try:
for fn in self.ecs:
if not fn(driver):
return False
return True
except StaleElementReferenceException:
return False
# usage example:
wait = WebDriverWait(driver, timeout)
ec1 = EC.invisibility_of_element_located(locator1)
ec2 = EC.invisibility_of_element_located(locator2)
ec3 = EC.invisibility_of_element_located(locator3)
all_ec = AllEc(ec1, ec2, ec3, description="Required elements to show page has loaded.")
found_elements = wait.until(all_ec, "Could not find all expected elements")
Alternatively I created AnyEc to look for multiple elements but returns on the first one found.
class AnyEc(object):
"""
Use with WebDriverWait to combine expected_conditions in an OR.
Example usage:
>>> wait = WebDriverWait(driver, 30)
>>> either = AnyEc(expectedcondition1, expectedcondition2, expectedcondition3, etc...)
>>> found = wait.until(either, "Cannot find any of the expected conditions")
"""
def __init__(self, *args: Callable, description: str = None):
self.ecs = args
self.description = description
def __iter__(self):
return self.ecs.__iter__()
def __call__(self, driver):
for fn in self.ecs:
try:
rt = fn(driver)
if rt:
return rt
except TypeError as exc:
raise exc
except Exception as exc:
# print(exc)
pass
def __repr__(self):
return " ".join(f"{e!r}," for e in self.ecs)
def __str__(self):
return f"{self.description!s}"
either = AnyEc(ec1, ec2, ec3)
found_element = wait.until(either, "Could not find any of the expected elements")
Lastly, if it's possible to do so, you could try waiting for Ajax to be finished.
This is not useful in all cases -- e.g. Ajax is always active. In the cases where Ajax runs and finishes it can work. There are also some ajax libraries that do not set the active attribute, so double check that you can rely on this.
def is_ajax_complete(driver)
rt = driver.execute_script("return jQuery.active", *args)
return rt == 0
wait.until(lambda driver: is_ajax_complete(driver), "Ajax did not finish")
(1) You did not mention the error you get with it
(2) Since you mention
...because driver (current element passed to this function)...
I'll assume this is actually a WebElement. In this case, instead of passing the object itself to your method, simply pass the selector that finds that WebElement (in your case, the ul). If the "driver gets lost in DOM", it could be that re-creating it inside the while current_wait < limit: loop could mitigate the problem
(3) yeap, time.sleep() will only get you that far
(4) Since the li elements loaded dynamically contain class=closed, instead of (By.XPATH, './/ul'), you could try (By.CSS_SELECTOR, 'ul > li.closed') (more details on CSS Selectors here)
Keeping in mind comments of Mr.E. and Arran I made my list traversal fully on CSS selectors. The tricky part was about my own list structure and marks (changing classes, etc.), as well as about creating required selectors on the fly and keeping them in memory during traversal.
I disposed waiting for several elements by searching for anything that is not loading state. You may use ":nth-child" selector as well like here:
#in for loop with enumerate for i
selector.append(' > li:nth-child(%i)' % (i + 1)) # identify child <li> by its order pos
This is my hard-commented code solution for example:
def parse_crippled_shifted_list(driver, frame, selector, level=1, parent_id=0, path=None):
"""
Traversal of html list of special structure (you can't know if element has sub list unless you enter it).
Supports start from remembered list element.
Nested lists have classes "closed" and "last closed" when closed and "open" and "last open" when opened (on <li>).
Elements themselves have classes "leaf" and "last leaf" in both cases.
Nested lists situate in <li> element as <ul> list. Each <ul> appears after clicking <a> in each <li>.
If you click <a> of leaf, page in another frame will load.
driver - WebDriver; frame - frame of the list; selector - selector to current list (<ul>);
level - level of depth, just for console output formatting, parent_id - id of parent category (in DB),
path - remained path in categories (ORM objects) to target category to start with.
"""
# Add current level list elements
# This method selects all but loading. Just what is needed to exclude.
selector.append(' > li > a:not([class=loading])')
# Wait for child list to load
try:
query = WebDriverWait(driver, WAIT_LONG_TIME).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ''.join(selector))))
except TimeoutException:
print "%s timed out" % ''.join(selector)
else:
# List is loaded
del selector[-1] # selector correction: delete last part aimed to get loaded content
selector.append(' > li')
children = driver.find_elements_by_css_selector(''.join(selector)) # fetch list elements
# Walk the whole list
for i, child in enumerate(children):
del selector[-1] # delete non-unique li tag selector
if selector[-1] != ' > ul' and selector[-1] != 'ul.ltr':
del selector[-1]
selector.append(' > li:nth-child(%i)' % (i + 1)) # identify child <li> by its order pos
selector.append(' > a') # add 'li > a' reference to click
child_link = driver.find_element_by_css_selector(''.join(selector))
# If we parse freely further (no need to start from remembered position)
if not path:
# Open child
try:
double_click(driver, child_link)
except InvalidElementStateException:
print "\n\nERROR\n", InvalidElementStateException.message(), '\n\n'
else:
# Determine its type
del selector[-1] # delete changed and already useless link reference
# If <li> is category, it would have <ul> as child now and class="open"
# Check by class is priority, because <li> exists for sure.
current_li = driver.find_element_by_css_selector(''.join(selector))
# Category case - BRANCH
if current_li.get_attribute('class') == 'open' or current_li.get_attribute('class') == 'last open':
new_parent_id = process_category_case(child_link, parent_id, level) # add category to DB
selector.append(' > ul') # forward to nested list
# Wait for nested list to load
try:
query = WebDriverWait(driver, WAIT_LONG_TIME).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ''.join(selector))))
except TimeoutException:
print "\t" * level, "%s timed out (%i secs). Failed to load nested list." %\
''.join(selector), WAIT_LONG_TIME
# Parse nested list
else:
parse_crippled_shifted_list(driver, frame, selector, level + 1, new_parent_id)
# Page case - LEAF
elif current_li.get_attribute('class') == 'leaf' or current_li.get_attribute('class') == 'last leaf':
process_page_case(driver, child_link, level)
else:
raise Exception('Damn! Alien class: %s' % current_li.get_attribute('class'))
# If it's required to continue from specified category
else:
# Check if it's required category
if child_link.text == path[0].name:
# Open required category
try:
double_click(driver, child_link)
except InvalidElementStateException:
print "\n\nERROR\n", InvalidElementStateException.msg, '\n\n'
else:
# This element of list must be always category (have nested list)
del selector[-1] # delete changed and already useless link reference
# If <li> is category, it would have <ul> as child now and class="open"
# Check by class is priority, because <li> exists for sure.
current_li = driver.find_element_by_css_selector(''.join(selector))
# Category case - BRANCH
if current_li.get_attribute('class') == 'open' or current_li.get_attribute('class') == 'last open':
selector.append(' > ul') # forward to nested list
# Wait for nested list to load
try:
query = WebDriverWait(driver, WAIT_LONG_TIME).until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, ''.join(selector))))
except TimeoutException:
print "\t" * level, "%s timed out (%i secs). Failed to load nested list." %\
''.join(selector), WAIT_LONG_TIME
# Process this nested list
else:
last = path.pop(0)
if len(path) > 0: # If more to parse
print "\t" * level, "Going deeper to: %s" % ''.join(selector)
parse_crippled_shifted_list(driver, frame, selector, level + 1,
parent_id=last.id, path=path)
else: # Current is required
print "\t" * level, "Returning target category: ", ''.join(selector)
path = None
parse_crippled_shifted_list(driver, frame, selector, level + 1, last.id, path=None)
# Page case - LEAF
elif current_li.get_attribute('class') == 'leaf':
pass
else:
print "dummy"
del selector[-2:]
This How I solved the problem that I want to wait until certain amount of post where complete load through AJAX
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# create a new Chrome session
driver = webdriver.Chrome()
# navigate to your web app.
driver.get("http://my.local.web")
# get the search button
seemore_button = driver.find_element_by_id("seemoreID")
# Count the cant of post
seemore_button.click()
# Wait for 30 sec, until AJAX search load the content
WebDriverWait(driver,30).until(EC.visibility_of_all_elements_located(By.CLASS_NAME, "post")))
# Get the list of post
listpost = driver.find_elements_by_class_name("post")