I am trying to scrape off of this webpage with following script.
I cannot wait for this element and it does not scrape correctly.
clickMe = wait(driver, 3).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ('//a[#class='style-scope match-pop-market']'))))
The element is correct in Chrome inspect.
//a[#class='style-scope match-pop-market']
How can I get the current pages elem_href and not invisible other elements it seems to be picking up on other pages.
//div[#class='mpm_match_title' and .//div[#class='mpm_match_title style-scope match-pop-market']]//a[#class='style-scope match-pop-market']
Does not work though this should fix this issue in theory. Any ideas? Current output:
None
None
None
None
None
None
None
None
None
None
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6381070
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386987
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386988
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386989
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386990
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386991
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386992
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387025
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387026
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387027
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387028
Unable to wait for element as it wants to wait for the invisible elements not on that current page.
So:
//div[contains(#class, 'mpm_match_title')] #TEXT
//div[contains(#class, 'mpm_match_title style-scope match-pop-market')] #BAR
//a[contains(#class, 'style-scope match-pop-market')] #HREF
style-scope match-pop-market
Combined:
//div[contains(#class, 'mpm_match_title') and .//div[contains(#class, 'mpm_match_title style-scope match-pop-market')]//a[#class='style-scope match-pop-market']
unable to find.
Desired output:
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6381070
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386987
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386988
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386989
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386990
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386991
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6386992
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387025
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387026
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387027
https://www.palmerbet.com/sports/soccer/italy-serie-b/match/6387028
Using the code from the pastebin link in the comments, I basically just modified the Xpath to search for specific elements that would identify the links on the current page.
from random import shuffle
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
driver = webdriver.Chrome()
driver.set_window_size(1024, 600)
driver.maximize_window()
driver.get('https://www.palmerbet.com/sports/soccer')
clickMe = wait(driver, 3).until(EC.element_to_be_clickable((By.XPATH,
('//*[contains(#class,"filter_labe")]'))))
options = driver.find_elements_by_xpath('//*[contains(#class,"filter_labe")]')
indexes = [index for index in range(len(options))]
shuffle(indexes)
xp = '//sport-match-grp[not(contains(#style, "display: none;"))]' \
'//match-pop-market[#class="sport-match-grp" and ' \
'not(contains(#style, "display: none;")) and ' \
'.//a[#id="match_link" and boolean(#href)]]'
for index in indexes:
print(f'Loading index {index}')
driver.get('https://www.palmerbet.com/sports/soccer')
clickMe1 = wait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,
'(//ul[#id="tournaments"]//li//input)[%s]' % str(index + 1))))
driver.execute_script("arguments[0].scrollIntoView();", clickMe1)
clickMe1.click()
try:
# this attempts to find any links on the page
clickMe = wait(driver, 3).until(EC.element_to_be_clickable((
By.XPATH, xp)))
elems = driver.find_elements_by_xpath(xp)
elem_href = []
for elem in elems:
print(elem.find_element_by_xpath('.//a[#id="match_link"]')
.get_attribute('href'))
elem_href.append(elem.get_attribute("href"))
except:
print(f'There are no matches in index {index}.')
Related
I have the following code that scrapes some information I need from a website. However, there are 61 pages I need to go through and scrape the same data that requires me to click on the 'Next' button to go to the next page with the url remaining the same.
I know it is possible to use driver.find_element_by_link_text('Next').click() to go to the next page but I am not sure how to include this in my code.
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
driver = webdriver.Chrome()
driver.get('https://mspotrace.org.my/Sccs_list')
time.sleep(20)
# Get list of elements
elements = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, "//a[#title='View on Map']")))
# Loop through element popups and pull details of facilities into DF
pos = 0
df = pd.DataFrame(columns=['facility_name','other_details'])
for element in elements:
try:
data = []
element.click()
time.sleep(10)
facility_name = driver.find_element_by_xpath('//h4[#class="modal-title"]').text
other_details = driver.find_element_by_xpath('//div[#class="modal-body"]').text
time.sleep(5)
data.append(facility_name)
data.append(other_details)
df.loc[pos] = data
WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button[aria-label='Close'] > span"))).click() # close popup window
print("Scraping info for",facility_name,"")
time.sleep(15)
pos+=1
except Exception:
alert = driver.switch_to.alert
print("No geo location information")
alert.accept()
pass
print(df)
Answering to your question, "I don't know how I would put it in my code"
Counter iii is used to repeat your existing code 60 times.
I cannot test the entire code, but I tested the loops.
For the sake of simplicity, in the code below I removed the element scraping so I could focus the test on repeating the clicks in the Next button, which is your question.
If you are going to test on your side, ensure you replace
print('your stuff would stay here!')
with the actual element scraping block that you have in your original code.
Hope it helps!
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
driver = webdriver.Chrome()
driver.get('https://mspotrace.org.my/Sccs_list')
time.sleep(20)
# Get list of elements
elements = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, "//a[#title='View on Map']")))
# Loop through element popups and pull details of facilities into DF
pos = 0
df = pd.DataFrame(columns=['facility_name','other_details'])
for iii in range(1,60):
for element in elements:
print('your stuff would stay here!')
#click next
btnNext = driver.find_element(By.XPATH,'//*[#id="dTable_next"]/a')
driver.execute_script("arguments[0].scrollIntoView();", btnNext)
driver.execute_script("arguments[0].click();", btnNext)
time.sleep(5)
#print current df. You may want to store it and print in the end only?
print(df)
# Get list of elements again
elements = WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.XPATH, "//a[#title='View on Map']")))
# Resetting vars again
pos = 0
df = pd.DataFrame(columns=['facility_name','other_details'])
I want to click on link and get back to main screen. After that click on second link and get back to main page and so on
When it goes to first link wait until i extract the title, phone number, location and name of the phone
my code is below
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Chrome(executable_path=r"C:\Users\Faheem\Downloads\New folder
(22)\chromedriver.exe")
driver.get(r"https://www.olx.com.pk/mobile-phones_c1453")
driver.find_element(By.XPATH, "//span[contains(text(),'Login')]").click()
time.sleep(2)
driver.find_element(By.XPATH, '//div[#class="_1075545d _21b291bd _42f36e3b d059c029
_858a64cf"]/button[4]').click()
time.sleep(3)
driver.find_element(By.ID, "phone").send_keys("3119407012")
time.sleep(2)
driver.find_element(By.XPATH, '//form[#class="a755fcd9"]/button/span').click()
time.sleep(2)
driver.find_element(By.ID, "password").send_keys("musliminstitute1")
time.sleep(2)
driver.find_element(By.XPATH, '//form[#class="a755fcd9"]/button').click()
time.sleep(2)
a = [elem.get_attribute('href') for elem in driver.find_elements(By.XPATH, '//div[#class =
"_1075545d _96d4439a d059c029 _858a64cf"]/ul/li/article/div[2]/a')]
b = driver.find_elements(By.XPATH, '//div[#class = "_1075545d _96d4439a d059c029
_858a64cf"]/ul/li/article/div[2]/a')
print(type(b))
for i in range(0,len(b)):
b = driver.find_elements(By.XPATH,
'//div[#class = "_1075545d _96d4439a d059c029
_858a64cf"]/ul/li/article/div[2]/a')
b[i].click()
time.sleep(3)
driver.back()
i am getting the following error
selenium.common.exceptions.ElementNotInteractableException: Message: element not
interactable: element has zero size
I'm quite new to python and have written a script using selenium to scrape a website. I've tried everything but can't get the loop to cycle through pages. It currently just repeats the data on the first page 5 times. I want to scrape all the pages for 'BR1' any help would be great, currently the script below only scrapes the first page 5 times.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
with open('rightmove.csv', 'w') as file:
file.write('PropertyCardcontent \n')
PATH = ("/usr/local/bin/chromedriver")
driver = webdriver.Chrome(PATH)
driver.get("https://www.rightmove.co.uk/house-prices.html")
print(driver.title)
elem = driver.find_element(By.NAME, 'searchLocation') # Find the search box
elem.send_keys('BR1' + Keys.RETURN)
try:
content = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.ID,'content'))
)
finally:
time.sleep(3)
for p in range(5):
sold = content.find_elements(By.CLASS_NAME, 'sold-prices-content-wrapper ')
for solds in sold:
address = solds.find_elements(By.CLASS_NAME, 'sold-prices-content ')
for addresses in address:
result = addresses.find_elements(By.CLASS_NAME, 'results ')
for results in result:
card = results.find_elements(By.CLASS_NAME,'propertyCard')
for propertyCard in card:
header = propertyCard.find_elements(By.CLASS_NAME,'propertyCard-content')
for propertyCardcontent in header:
road = propertyCardcontent.find_elements(By.CLASS_NAME,'title')
for propertyCardcontent in header:
road = propertyCardcontent.find_elements(By.CLASS_NAME,'subTitle')
for subtitle in road:
bed = subtitle.find_elements(By.CLASS_NAME, 'propertyType')
with open('rightmove.csv', 'a') as file:
for i in range(len(result)):
file.write(header[i].text + '\n')
button = driver.find_element(By.XPATH, '//*[#id="content"]/div[2]/div[2]/div[4]/div[27]/div[3]/div')
button.click()
file.close()
time.sleep(3)
driver.quit()
Since the website link has page number on it, I recommend you put the base url as "https://www.rightmove.co.uk/house-prices/br1.html?page=1", and loop through the pages while changing the last index of the url with methods like format string.
One other thing, you don't need to implement all those for loops, you can simply assign each variable to its specific value since everything you need is inside an html block which is easy to navigate on it.
Update:
I'm sorry for being late, had unexpected stuff(...).
I've made some changes as I use Brave, so make sure you select your browser, Chrome I believe, the chromedriver(ver:102) stays the same (or depending your Chrome version).
I've also got the Price and Date and stored them in a tuple.
Every record is stored in a list[Title, propertyType, tupleof(Price_Date)]
At the end, it creates a csv and stores everything inside with a ";" as delimter.
You can if you prefer split the price and date for later use, up to you.
Note: This looping method only applies to websites in which the number of page is included within the URL. In this case, both the key and number of page is included in the URL.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
import time
import random
import itertools
options = Options()
options.binary_location = r'C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe'
driver = webdriver.Chrome(options = options, service = Service("chromedriver.exe"))
key_word = "BR1".lower()
base_url = f"https://www.rightmove.co.uk/house-prices/{key_word}.html?page=1"
driver.get(base_url)
#Number of pages
pages = driver.find_element(By.XPATH, '//span[#class="pagination-label"][2]').text
pages = int(pages.strip('of'))
WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CLASS_NAME, 'results '))
)
data = []
pc = 0
for p in range(1,pages+1):
driver.get(f"https://www.rightmove.co.uk/house-prices/{key_word}.html?page={p}")
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//div//div[#class="propertyCard"]'))
)
propertyCards = driver.find_elements(By.XPATH, '//div//div[#class="propertyCard"]')
for propertyCard in propertyCards:
title = propertyCard.find_element(By.CLASS_NAME, 'title').text
propertyType = propertyCard.find_element(By.CLASS_NAME, 'propertyType').text
price_list = propertyCard.find_elements(By.CLASS_NAME, 'price')
date_list = propertyCard.find_elements(By.CLASS_NAME, 'date-sold')
data.append([title,propertyType])
for p, d in itertools.zip_longest(price_list, date_list , fillvalue = None):
try:
price = p.text
date = d.text
data[pc].append((price, date))
except Exception as e:
print(e)
pc+=1
time.sleep(random.randint(1,4))
print(data)
with open('rightmove.csv', 'w') as file:
header = "Title;propertyType;Price_Date\n"
file.write(header)
for record in data:
file.write("{};{};{}\n".format(record[0],record[1],record[2:]))
driver.quit()
You don't have to go down to dom elem by elem, you can just use xpath or class_name (if it's unique, otherwise it's better xpath or css-selector) and get the item you are looking for.
Anyway follow this:
import time
import selenium.webdriver as webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome("/usr/local/bin/chromedriver")
driver.get("https://www.rightmove.co.uk/house-prices.html")
# send query
query = "BR1"
search_bar = driver.find_element(By.XPATH, '//input[#class="searchBox ac_input"]')
search_bar.send_keys(query)
search_bar.send_keys(Keys.ENTER)
# wait to result been loaded
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'propertyCard'))
)
#get amount of pages
pages = driver.find_element(By.XPATH, '//span[#class="pagination-label"][2]').text
pages = int(pages.replace('of ', ''))
data = []
i = 1
while i <= pages:
WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '//div[contains(text(), "Next")]'))
).click()
# wait page load result
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//div//div[#class="propertyCard"]'))
)
propertyCards = driver.find_elements(By.XPATH, '//div//div[#class="propertyCard"]')
# loop over result and store data
for propertyCard in propertyCards:
title = propertyCard.find_element(By.CLASS_NAME, 'title').text
propertyType = propertyCard.find_element(By.CLASS_NAME, 'propertyType').text
data.append((title, propertyType))
time.sleep(1)
i += 1
print("you reach the last page")
#get number of results
printf(data)
driver.close()
I use a list of tuple cause in your example you want store 2 item, if you want store more data you can use a dict and then convert into csv with Dictwriter directly. Enjoy.
i try to scrape the contact data from companies from this website:
https://de.statista.com/companydb/suche?idCountry=276&idBranch=0&revenueFrom=-1000000000000000000&revenueTo=1000000000000000000&employeesFrom=0&employeesTo=100000000&sortMethod=revenueDesc&p=4
I can do this with the following Code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import pandas as pd
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
company_list= [] #create empty list
driver = webdriver.Chrome('/Users/rieder/Anaconda3/chromedriver_win32/chromedriver.exe') #define driver
driver.get('https://de.statista.com/companydb/suche?idCountry=276&idBranch=0&revenueFrom=-1000000000000000000&revenueTo=1000000000000000000&employeesFrom=0&employeesTo=100000000&sortMethod=revenueDesc&p=1') # open Website
driver.find_element_by_id("cookiesNotificationConfirm").click(); #accept cookies
driver.find_element_by_xpath("//*[#id='content']/section[3]/div/div/form/div/div[2]/div[2]/table/tr[2]/td[1]/a").click(); #click on the first company namelink
contact_data = WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.XPATH, "/html/body/div[3]/div[4]/section[6]/div/div[2]/div[2]/div/div"))) #get the contactdata from the company you chose before
for cn in contact_data:
company_list.append(cn.text) # this stores the text in the list
driver.back() #navigate to previous site
time.sleep(5) #wait for the pop-up window to appear
driver.find_element_by_xpath("/html/body/div[15]/div[3]/div[3]/div[1]/button[1]").click(), #deny the websites popup
time.sleep(5) #wait for the popup to vanish
driver.find_element_by_xpath("//*[#id='content']/section[3]/div/div/form/div/div[2]/div[2]/table/tr[3]/td[1]/a").click(); #click on the next company namelink
contact_data2 = WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.XPATH, "/html/body/div[3]/div[4]/section[6]/div/div[2]/div[2]/div/div"))) #get the contactdata from the company you chose before
for cn in contact_data2:
company_list.append(cn.text) # this stores the text in the list
print(company_list) #show the list
My Output is this:
['GUTex GmbH\nGerhard-Unland-Str. 1\n26683\nSaterland\nDeutschland', 'Robert Bosch GmbH\nRobert-Bosch-Platz 1\n70839\nGerlingen\nDeutschland']
Problem:
I want, that my code does this to the whole list on page 1 and then goes on on the next page and do it again. This shall go on until I have for example 100 adresses in the list. I would do this with a "while loop" but my xpaths for finding the adress are too specified, so it would always loop the same companies.
Thanks a lot inbefore
Try below code for one page data extract. Update the code for iterating over the next page records.
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
company_list= [] #create empty list
driver = webdriver.Chrome() #define driver
driver.get('https://de.statista.com/companydb/suche?idCountry=276&idBranch=0&revenueFrom=-1000000000000000000&revenueTo=1000000000000000000&employeesFrom=0&employeesTo=100000000&sortMethod=revenueDesc&p=1') # open Website
if len(driver.find_elements_by_id("cookiesNotificationConfirm")) > 0:
driver.find_element_by_id("cookiesNotificationConfirm").click(); # accept cookies
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//table[#class="zebraTable zebraTable--companies"]//td[1]')))
elementsSize = len(driver.find_elements_by_xpath('//table[#class="zebraTable zebraTable--companies"]//td[1]'))
# To iterate over the company list and click on the company name then capture the address on navigated page
# come back to previous page and repeat the same.
for i in range(elementsSize):
WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, '//table[#class="zebraTable zebraTable--companies"]//td[1]')))
elements = driver.find_elements_by_xpath('//table[#class="zebraTable zebraTable--companies"]//td[1]/a')
company_name = elements[i].text
elements[i].click() # click on the first company namelink
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH,
'//*[#id="contactInformation"]//div[#class="companyContactBox"]'))) # get the contactdata from the company you chose before
contact_data = driver.execute_script("return document.getElementsByClassName('companyContactBox')[0].innerText")
# print(contact_data)
company_list.append(company_name + " : " + contact_data)
driver.back() # navigate to previous site
print(company_list)
Thanks to Dilip Meghwals comment above i could finish my Code:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
company_list= [] #create empty list
count = 25
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.notifications" : 2}
chrome_options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome('/Users/rieder/Anaconda3/chromedriver_win32/chromedriver.exe', chrome_options=chrome_options) #define driver
driver.get('https://de.statista.com/companydb/suche?idCountry=276&idBranch=0&revenueFrom=-1000000000000000000&revenueTo=1000000000000000000&employeesFrom=0&employeesTo=100000000&sortMethod=revenueDesc&p=1') # open Website
if len(driver.find_elements_by_id("cookiesNotificationConfirm")) > 0:
driver.find_element_by_id("cookiesNotificationConfirm").click(); # accept cookies
while len(company_list) < 1000:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//table[#class="zebraTable zebraTable--companies"]//td[1]')))
elementsSize = len(driver.find_elements_by_xpath('//table[#class="zebraTable zebraTable--companies"]//td[1]'))
# To iterate over the company list and click on the company name then capture the address on navigated page
# come back to previous page and repeat the same.
for i in range(elementsSize):
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//table[#class="zebraTable zebraTable--companies"]//td[1]')))
elements = driver.find_elements_by_xpath('//table[#class="zebraTable zebraTable--companies"]//td[1]/a')
company_name = elements[i].text
elements[i].click() # click on the first company namelink
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH,'//*[#id="contactInformation"]//div[#class="companyContactBox"]'))) # get the contactdata from the company you chose before
contact_data = driver.execute_script("return document.getElementsByClassName('companyContactBox')[0].innerText")
# print(contact_data)
company_list.append(contact_data)
driver.back() # navigate to previous site
time.sleep(5)
driver.find_element_by_xpath("//*[#id='content']/section[3]/div/div/form/div/div[2]/div[2]/div[2]/div/button[2]").click();
company_list = [w.replace('\n', ', ') for w in company_list]
print(company_list)
df_company_name = pd.DataFrame(company_list, columns =['Name'])
df_company_name.to_excel("company_name.xlsx")
I am trying to get products' images when clicked upon since they are higher resolution. So far, I have this code:
start = soup(d.page_source, 'html.parser') # d is the driver
while start.find('div', {'class': 'gallery-images'}) is None:
start = soup(d.page_source, 'html.parser')
product_images = [i.find('img', {'alt': 'Mirror Embellished Scuba Skirt'}).src for i in
start.find_all('div', {'class': 'gallery-images'})]
However, it says that nonetype has no object src. start.find_all returns list of all the elements that are cascaded inside of it.
Edit: Webpage:
https://www.michaelkors.com/mirror-embellished-scuba-skirt/_/R-US_MU97EYCBGL
Edit 2:
I have hardcoded value of alt. Page scrapy calls doesn't have alt with this specific value. But when I remove alt even then it isn't returning anything
Here is the code.It returns me src values.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
driver = webdriver.Chrome()
driver.get("https://www.michaelkors.co.uk/")
search_ele=WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.search-link[title="Search"]')))
search_ele.click()
WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.CSS_SELECTOR,'input#search-box'))).send_keys('mirror-embellished-scuba-skirt')
element=WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.CSS_SELECTOR,'button.search-icon-btn')))
driver.execute_script("arguments[0].click();", element)
WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.CSS_SELECTOR,'a[title="Mirror Embellished Scuba Skirt"]'))).click()
time.sleep(3)
soup=BeautifulSoup(driver.page_source,'html.parser')
product_images=[]
for i in soup.find_all('div', class_='gallery-images'):
for img in i.select('img[alt="Mirror Embellished Scuba Skirt"]'):
product_images.append(img['src'])
print(product_images)
Output:
['//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_1?wid=558&hei=748&op_sharpen=1&resMode=sharp2&qlt=90', '//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_2?wid=558&hei=748&op_sharpen=1&resMode=sharp2&qlt=90', '//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_3?wid=558&hei=748&op_sharpen=1&resMode=sharp2&qlt=90', '//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_1?wid=1300', '//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_2?wid=1300', '//michaelkors.scene7.com/is/image/MichaelKors/MU97EYCBGL-0001_3?wid=1300']