Please help. I'm locating, printing and saving the printed element.text to a csv file. Something isn't right. Basically if I just print the element without click to a new page, all elements from the specified positions of the xpath will get output,see code below.
while True:
with open('C:/Python34/email.csv','a') as f:
z=csv.writer(f, delimiter='\t',lineterminator = '\n',)
row = []
for sub_list in (driver.find_elements_by_xpath("//*[#id='wrapper']/div[2]/div[2]/div/div[2]/div[1]/div[3]/div[1]/div[2]/div/div[2]/div/div[2]/div/div[position() = 1 or position() = 2 or position() = 3]")):
print(sub_list.text,end=" ")
However, if I include clicks in the loop(see code in below) it will only print the element of one position only(e.g. div[1]). e.g. in the first iteration it will only print element in div[1], the next iteration will only print element in div[2] and so forth. Please enlighten me. With thanks
for sub_list in (driver.find_elements_by_xpath("//*[#id='wrapper']/div[2]/div[2]/div/div[2]/div[1]/div[3]/div[1]/div[2]/div/div[2]/div/div[2]/div/div[position() = 1 or position() = 2 or position() = 3]")):
print(sub_list.text,end=" ")
z.writerow(sub_list.text)
WebDriverWait(driver, 50).until(EC.visibility_of_element_located((By.XPATH,'//*[#id="detail-pagination-next-btn"]/span')))
WebDriverWait(driver, 50).until(EC.visibility_of_element_located((By.XPATH,'//*[#id="detail-pagination-next-btn"]')))
WebDriverWait(driver, 50).until(EC.visibility_of_element_located((By.ID,'detail-pagination-next-btn')))
WebDriverWait(driver, 50).until(EC.element_to_be_clickable((By.ID,'detail-pagination-next-btn')))
WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, "detail-company-selection")))
time.sleep(5)
c=driver.find_element_by_id('detail-pagination-next-btn')
c.click()
time.sleep(5)
Related
I'm trying to click on element by xpath that contains text from list.
categories = (WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, 'div.sph-MarketGroupNavBarButton '))))
categorylist = []
for category in categories:
text = category.text
categorylist.append(text)
print(text)
categoryindex = categories[3]
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, f"//div[text() ='{categoryindex}']"))).click()
print(categoryindex)
When I run it, the script creates the list and prints out list content and then I want to search for element that contains the text from the list and click on it, but this part of code doesn't work : It doesn't click or print the specified text. Nothing happens and I get timeout error.
categoryindex = categories[3]
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, f"//div[text() ='{categoryindex}']"))).click()
print(categoryindex)
When I replace {categoryindex} with actual text from list it clicks on it.
Change
categoryindex = categories[3]
to
categoryindex = categorylist[3]
I want to get the locator of this element (5,126,
601) but seem cant get it normally.
I think it will have to hover the mouse to the element and try to get the xpath but still I cant hover my mouse into it because it an SVG element . Any one know a way to get the locator properly?
here is the link to the website: https://fundingsocieties.com/progress
Well, this element is updated only by hovering over the chart.
This is the unique XPath locator for this element:
"//*[name()='text']//*[name()='tspan' and(contains(#style,'bold'))]"
The entire Selenium command can be:
total_text = driver.find_element(By.XPATH, "//*[name()='text']//*[name()='tspan' and(contains(#style,'bold'))]").text
This can also be done with this CSS Selector: text tspan[style*='bold'], so the Selenium command could be
total_text = driver.find_element(By.CSS_SELECTOR, "text tspan[style*='bold']").text
Well, CSS Selector looks much shorter :)
Clicking on each node in turn will lead to the accompanying text being placed in the highcharts-label element. This text can then be retrieved and the Quarter (1st tspan) be linked to the Total value (4th tspan) that you desire.
url="https://fundingsocieties.com/progress"
driver.get(url)
chart = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, "//div[#data-highcharts-chart='0']"))
)
markers = chart.find_elements(By.XPATH, "//*[local-name()='g'][contains(#class,'highcharts-markers')]/*[local-name()='path']")
for m in markers:
m.click()
try:
element = WebDriverWait(driver, 2).until(
EC.presence_of_element_located((By.XPATH, "//*[local-name()='g'][contains(#class,'highcharts-label')]/*[local-name()='text']"))
)
tspans = element.find_elements(By.XPATH, "./*[local-name()='tspan']")
if len(tspans) > 3:
print ("%s = %s" % (tspans[0].text, tspans[3].text))
except TimeoutException:
pass
The output is as follows:
Q2-2015 = 2
Q3-2015 = 12
....
Q1-2022 = 5,076,978
Q2-2022 = 5,109,680
Q3-2022 = 5,122,480
Q4-2022 = 5,126,601
I have a question. I find an element on the pages using the class, and display the text from there, and then split() it disassembles, but there is an error when there is no element, it does not parse.
Code:
spans = driver.find_elements(By.XPATH, "//span[#class='ipsContained ipsType_break']")
for span in spans:
atag = span.find_element(By.XPATH, ".//a")
print(atag.get_attribute('href'))
urlik = atag.get_attribute('href')
driver.get(url=urlik)
time.sleep(2)
urla = driver.find_element(By.CLASS_NAME, "ipsPagination_pageJump").text
for page_number in range(int(urla.split()[3])):
page_number = page_number + 1
driver.get(url=urlik + f"page/{page_number}")
time.sleep(2)
imgs = driver.find_elements(By.CLASS_NAME, "cGalleryPatchwork_image")
for i in imgs:
driver.execute_script("arguments[0].scrollIntoView(true);", i)
time.sleep(0.2)
print(i.get_attribute("src"))
I need check this:
urla = driver.find_element(By.CLASS_NAME, "ipsPagination_pageJump").text
To attempt to find an element on the pages using the class and display the text from there irrespective of the element being present or not you can wrap up the code in a try-except{} block handling the NoSuchElementException as follows:
driver.get(url=urlik)
time.sleep(2)
try:
urla = driver.find_element(By.CLASS_NAME, "ipsPagination_pageJump").text
for page_number in range(int(urla.split()[3])):
page_number = page_number + 1
driver.get(url=urlik + f"page/{page_number}")
time.sleep(2)
imgs = driver.find_elements(By.CLASS_NAME, "cGalleryPatchwork_image")
for i in imgs:
driver.execute_script("arguments[0].scrollIntoView(true);", i)
time.sleep(0.2)
print(i.get_attribute("src"))
except NoSuchElementException:
print("Element is not present")
Instead of
urla = driver.find_element(By.CLASS_NAME, "ipsPagination_pageJump")
Use
urla = driver.find_elements(By.CLASS_NAME, "ipsPagination_pageJump")
if urla:
urla[0].text
find_elements method returns a list of web elements matching the passed locator.
So, in case such element(s) existing urla will be a non-empty list while non-empty list is interpreted in Python as a Boolean True.
In case no matching elements found urla will be an empty list while empty list is interpreted in Python as a Boolean False.
I am attempting to scrape data through multiple pages (36) from a website to gather the document number and the revision number for each available document and save it to two different lists. If I run the code block below for each individual page, it works perfectly. However, when I added the while loop to loop through all 36 pages, it will loop, but only the data from the first page is saved.
#sam.gov website
url = 'https://sam.gov/search/?index=sca&page=1&sort=-modifiedDate&pageSize=25&sfm%5Bstatus%5D%5Bis_active%5D=true&sfm%5BwdPreviouslyPerformedWrapper%5D%5BpreviouslyPeformed%5D=prevPerfNo%2F'
#webdriver
driver = webdriver.Chrome(options = options_, executable_path = r'C:/Users/439528/Python Scripts/Spyder/chromedriver.exe' )
driver.get(url)
#get rid of pop up window
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#sds-dialog-0 > button > usa-icon > i-bs > svg'))).click()
#list of revision numbers
revision_num = []
#empty list for all the WD links
WD_num = []
substring = '2015'
current_page = 0
while True:
current_page += 1
if current_page == 36:
#find all elements on page named "field name". For each one, get the text. if the text is 'Revision Date'
#then, get the 'sibling' element, which is the actual revision number. append the date text to the revision_num list.
elements = driver.find_elements_by_class_name('sds-field__name')
wd_links = driver.find_elements_by_class_name('usa-link')
for i in elements:
element = i.text
if element == 'Revision Number':
revision_numbers = i.find_elements_by_xpath("./following-sibling::div")
for x in revision_numbers:
a = x.text
revision_num.append(a)
#finding all links that have the partial text 2015 and putting the wd text into the WD_num list
for link in wd_links:
wd = link.text
if substring in wd:
WD_num.append(wd)
print('Last Page Complete!')
break
else:
#find all elements on page named "field name". For each one, get the text. if the text is 'Revision Date'
#then, get the 'sibling' element, which is the actual revision number. append the date text to the revision_num list.
elements = driver.find_elements_by_class_name('sds-field__name')
wd_links = driver.find_elements_by_class_name('usa-link')
for i in elements:
element = i.text
if element == 'Revision Number':
revision_numbers = i.find_elements_by_xpath("./following-sibling::div")
for x in revision_numbers:
a = x.text
revision_num.append(a)
#finding all links that have the partial text 2015 and putting the wd text into the WD_num list
for link in wd_links:
wd = link.text
if substring in wd:
WD_num.append(wd)
#click on next page
click_icon = WebDriverWait(driver, 5, 0.25).until(EC.visibility_of_element_located([By.ID,'bottomPagination-nextPage']))
click_icon.click()
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, 'main-container')))
Things I've tried:
I added the WebDriverWait in order to slow the script down for the page to load and/or elements to be clickable/located
I declared the empty lists outside the loop so it does not overwrite over each iteration
I have edited the while loop multiple times to either count up to 36 (while current_page <37) or moved the counter to the top or bottom of the loop)
Any ideas? TIA.
EDIT: added screenshot of 'field name'
I have refactor your code and made things very simple.
driver = webdriver.Chrome(options = options_, executable_path = r'C:/Users/439528/Python Scripts/Spyder/chromedriver.exe' )
revision_num = []
WD_num = []
for page in range(1,37):
url = 'https://sam.gov/search/?index=sca&page={}&sort=-modifiedDate&pageSize=25&sfm%5Bstatus%5D%5Bis_active%5D=true&sfm%5BwdPreviouslyPerformedWrapper%5D%5BpreviouslyPeformed%5D=prevPerfNo%2F'.format(page)
driver.get(url)
if page==1:
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#sds-dialog-0 > button > usa-icon > i-bs > svg'))).click()
elements = WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located((By.XPATH,"//a[contains(#class,'usa-link') and contains(.,'2015')]")))
wd_links = WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located((By.XPATH,"//div[#class='sds-field__name' and text()='Revision Number']/following-sibling::div")))
for element in elements:
revision_num.append(element.text)
for wd_link in wd_links:
WD_num.append(wd_link.text)
print(revision_num)
print(WD_num)
if you know only 36 pages to iterate you can pass the value in the url.
wait for element visible using webdriverwait
construct your xpath in such a way so can identify element uniquely without if, but.
console output on my terminal:
I only want to scrape the required information contained in the black box, and delete/remove/exclude the information contained in the red box
I am doing this because class names "entry" and "partial entry" exist in both boxes. Only the first "partial entry" contains the information that I need, so I plan to delete/remove/exclude the classname "mgrRspnInLine".
My code is:
while True:
container = driver.find_elements_by_xpath('.//*[contains(#class,"review-container")]')
for item in container:
try:
element = item.find_element_by_class_name('mgrRspnInline')
driver.execute_script("""var element = document.getElementsByClassName("mgrRspnInline")[0];element.parentNode.removeChild(element);""", element)
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"taLnk ulBlueLinks")]')))
element = WebDriverWait(driver, 50).until(EC.element_to_be_clickable((By.XPATH,'.//*[contains(#class,"taLnk ulBlueLinks")]')))
element.click()
time.sleep(2)
rating = item.find_elements_by_xpath('.//*[contains(#class,"ui_bubble_rating bubble_")]')
for rate in rating:
rate = rate.get_attribute("class")
rate = str(rate)
rate = rate[-2:]
score_list.append(rate)
time.sleep(2)
stay = item.find_elements_by_xpath('.//*[contains(#class,"recommend-titleInline noRatings")]')
for stayed in stay:
stayed = stayed.text
stayed = stayed.split(', ')
stayed.append(stayed[0])
travel_type.append(stayed[1])
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"noQuotes")]')))
summary = item.find_elements_by_xpath('.//*[contains(#class,"noQuotes")]')
for comment in summary:
comment = comment.text
comments.append(comment)
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"ratingDate")]')))
rating_date = item.find_elements_by_xpath('.//*[contains(#class,"ratingDate")]')
for date in rating_date:
date = date.get_attribute("title")
date = str(date)
review_date.append(date)
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"partial_entry")]')))
review = item.find_elements_by_xpath('.//*[contains(#class,"partial_entry")]')
for comment in review:
comment = comment.text
print(comment)
reviews.append(comment)
except (NoSuchElementException) as e:
continue
try:
element = WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.XPATH,'.//*[contains(#class,"nav next taLnk ui_button primary")]')))
element.click()
time.sleep(2)
except (ElementClickInterceptedException,NoSuchElementException) as e:
print(e)
break
Basically within the "review-container" I searched first for the class name "mgrRspnInLine", then tried to delete it using the execute_script.
but unfortunately, the output still shows the contents contained in the"mgrRspnInLine".
If you want to avoid matching second element by your XPath you can just modify XPath as below:
.//*[contains(#class,"partial_entry") and not(ancestor::*[#class="mgrRspnInLine"])]
This will match element with class name "partial_entry" only if it doesn't have ancestor with class name "mgrRspnInLine"
If you want the first occurrence you could use css class selector instead of:
.partial_entry
and retrieve with find_element_by_css_selector:
find_element_by_css_selector(".partial_entry")
You can delete all the .mgrRspnInLine elements with:
driver.execute_script("[...document.querySelectorAll('.mgrRspnInLine')].map(el => el.parentNode.removeChild(el))")
Stitching the comment by Andersson, and the two answers provided by QHarr, and pguardiario. I finally solved the problem.
The key is to target a container within the container, all the information is contained in the class name "ui_column is-9" which is contained in the class name "review-container", hence addressing Andersson's comment of multiple mgrRspnInLine.
Within the nested loop, I used pguardianrio's suggestion to delete existing multiple mgrRspnInLine, then adding QHarr's answer on .partial_entry
while True:
container = driver.find_elements_by_xpath('.//*[contains(#class,"review-container")]')
for items in container:
element = WebDriverWait(driver, 1000).until(EC.element_to_be_clickable((By.XPATH,'.//*[contains(#class,"taLnk ulBlueLinks")]')))
element.click()
time.sleep(10)
contained = items.find_elements_by_xpath('.//*[contains(#class,"ui_column is-9")]')
for item in contained:
try:
driver.execute_script("[...document.querySelectorAll('.mgrRspnInLine')].map(el => el.parentNode.removeChild(el))")
rating = item.find_element_by_xpath('//*[contains(#class,"ui_bubble_rating bubble_")]')
rate = rating .get_attribute("class")
rate = str(rate)
rate = rate[-2:]
score_list.append(rate)
time.sleep(2)
stay = item.find_element_by_xpath('.//*[contains(#class,"recommend-titleInline")]')
stayed = stay.text
stayed = stayed.split(', ')
stayed.append(stayed[0])
travel_type.append(stayed[1])
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"noQuotes")]')))
summary = item.find_element_by_xpath('.//*[contains(#class,"noQuotes")]')
comment = summary.text
comments.append(comment)
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"ratingDate")]')))
rating_date = item.find_element_by_xpath('.//*[contains(#class,"ratingDate")]')
date = rating_date.get_attribute("title")
date = str(date)
review_date.append(date)
WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.XPATH,'.//*[contains(#class,"partial_entry")]')))
review = item.find_element_by_css_selector(".partial_entry")
comment = review.text
print(comment)
except (NoSuchElementException) as e:
continue
try:
element = WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.XPATH,'.//*[contains(#class,"nav next taLnk ui_button primary")]')))
element.click()
time.sleep(2)
except (ElementClickInterceptedException,NoSuchElementException) as e:
print(e)
break