Youtube Scraping (List of Videos) - python

On a Youtube channel, I'm trying to get a list of videos listed in the channel. (i.e. link, title, view, etc)
Yet, my code doesn't return any object. Any help will be appreciated!
from bs4 import BeautifulSoup as bs
import requests
from selenium.webdriver import Chrome
import re
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
address = "https://www.youtube.com/channel/UCepWEz3BW6EMKA4CU-yGDMw/videos"
#driver = webdriver.Chrome('./chromedriver')
#driver.get(address)
#driver.maximize_window()
#body = driver.find_element_by_css_selector('body')
#for i in range(250):
# body.send_keys(Keys.PAGE_DOWN)
# time.sleep(1)
r = requests.get(address)
page = r.text
soup=bs(page,'html.parser')
result=soup.find_all('div', attrs={"class": 'videoId'})
print(result)

Try it:
from selenium import webdriver
url = "https://www.youtube.com/channel/UCepWEz3BW6EMKA4CU-yGDMw/videos"
browser = webdriver.Firefox()
browser.get(url)
datas = browser.find_elements_by_css_selector(".ytd-grid-renderer")
result = {"title":[], "link":[], "views":[]}
for data in datas:
try:
title = data.find_element_by_css_selector("#video-title").text
result["title"].append(title)
except:
result["title"].append("")
try:
link = data.find_element_by_css_selector("#video-title").get_attribute("href")
result["link"].append(link)
except:
result["link"].append("")
try:
views = data.find_element_by_css_selector("#metadata-line .ytd-grid-video-renderer:nth-child(1)").text
result["views"].append(views)
except:
result["views"].append("")
# print(result)
browser.close()

Related

Python Selenium .send_keys() only sending first character of my string

I was trying to automate a post to Facebook using Python Selenium, and it was 90% complete. The only issue is that the string I give is "test," but when Facebook posts, it just sends the first character of "test," which is "t."
This is the code:
#libraries
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.common.keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import bs4
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from time import sleep
import pyautogui
#fetching hashtags
def hashtags(hash_idea):
url = 'http://best-hashtags.com/hashtag/' + hash_idea
try:
req = Request(url, headers={'User-Agent' : 'Mozilla/5.0'})
page = urlopen(req, timeout=10)
page_html = page.read()
page.close()
page_soup = soup(page_html, 'html.parser')
result = page_soup.find('div',{'class':'tag-box tag-box-v3 margin-bottom-40'})
tags = result.decode()
start_index = tags.find('#')
end_index = tags.find('</p1>')
tags = tags[start_index:end_index]
return tags
except:
print('Something went wrong While Fetching hashtags')
def login(username, password):
try:
url = 'https://facebook.com'
driver.get(url)
user = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'email')))
user.send_keys(username)
pas = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'pass')))
pas.send_keys(password)
login_btn = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME,'login')))
login_btn.click()
except:
print('Something went wrong while login process')
def upload(img_path,caption):
try:
btn1 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div/div[2]/div/div/div/div[3]/div/div[2]/div/div/div/div[1]/div/div[1]')))
btn1.click()
btn2= WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[1]/div[2]/div/div[1]/div/span/div/div/div[1]/div/div/div[1]/i')))
btn2.click()
btn3 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[2]/div/div[1]/div/div/div/div[1]/div/div/div/div[1]/div/i')))
btn3.click()
pyautogui.write(img_path)
pyautogui.press('enter')
cap = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[1]/div[1]/div/div/div[1]')))
cap.send_keys(caption)
sleep(5) # this is mandatory while doing some thing with bot
btn_post = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[2]/div/div/div[1]/div')))
btn_post.click()
except:
print('Something Went Wrong While posting the image or video')
if __name__== "__main__":
#turn for credentials, driver, and caption
username = input('username : ')
password = input('pass : ')
img_path = 'pic1.jpg'
hash_idea = 'covid'
caption = 'test' # if you want to
caption = caption + '\n' + hashtags(hash_idea)
driver = webdriver.Firefox(executable_path="C:/Users/Asus/Downloads/Compressed/geckodriver-v0.32.0-win64/geckodriver.exe")
login(username,password)
upload(img_path,caption)
I wanted to automate the post with the text I provided in the code.
You can try several alternatives
In the definition of cap replace presence_of_element_located with element_to_be_clickable.
Do what in 1. and moreover add
cap = ...
cap.clear()
cap.click()
cap.send_keys(caption)
Do what in 1. and moreover use ActionChains
from selenium.webdriver.common.action_chains import ActionChains
actions = ActionChains(driver)
cap = ...
actions.move_to_element(cap) # move the mouse to the middle of element
actions.click()
actions.send_keys(caption).perform()
If none works, then you can always send one character at a time
[cap.send_keys(c) for c in caption]

Python Selenium 'post' button of Facebook is not working

Previously I asked about the Python Selenium .send_keys() only sending first character of my string. The issue has now been resolved, but the post button is no longer functional. The post button functions properly when there is a string problem, but when that problem is fixed, the post button stops functioning properly.
This is the previous code (only sending first character of my string) :
#libraries
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.common.keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import bs4
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from time import sleep
import pyautogui
#fetching hashtags
def hashtags(hash_idea):
url = 'http://best-hashtags.com/hashtag/' + hash_idea
try:
req = Request(url, headers={'User-Agent' : 'Mozilla/5.0'})
page = urlopen(req, timeout=10)
page_html = page.read()
page.close()
page_soup = soup(page_html, 'html.parser')
result = page_soup.find('div',{'class':'tag-box tag-box-v3 margin-bottom-40'})
tags = result.decode()
start_index = tags.find('#')
end_index = tags.find('</p1>')
tags = tags[start_index:end_index]
return tags
except:
print('Something went wrong While Fetching hashtags')
def login(username, password):
try:
url = 'https://facebook.com'
driver.get(url)
user = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'email')))
user.send_keys(username)
pas = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'pass')))
pas.send_keys(password)
login_btn = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME,'login')))
login_btn.click()
except:
print('Something went wrong while login process')
def upload(img_path,caption):
try:
btn1 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div/div[2]/div/div/div/div[3]/div/div[2]/div/div/div/div[1]/div/div[1]')))
btn1.click()
btn2= WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[1]/div[2]/div/div[1]/div/span/div/div/div[1]/div/div/div[1]/i')))
btn2.click()
btn3 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[2]/div/div[1]/div/div/div/div[1]/div/div/div/div[1]/div/i')))
btn3.click()
pyautogui.write(img_path)
pyautogui.press('enter')
cap = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[1]/div[1]/div/div/div[1]')))
cap.send_keys(caption)
sleep(5) # this is mandatory while doing some thing with bot
btn_post = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[2]/div/div/div[1]/div')))
btn_post.click()
except:
print('Something Went Wrong While posting the image or video')
if __name__== "__main__":
#turn for credentials, driver, and caption
username = input('username : ')
password = input('pass : ')
img_path = 'pic1.jpg'
hash_idea = 'covid'
caption = 'test' # if you want to
caption = caption + '\n' + hashtags(hash_idea)
driver = webdriver.Firefox(executable_path="C:/Users/Asus/Downloads/Compressed/geckodriver-v0.32.0-win64/geckodriver.exe")
login(username,password)
upload(img_path,caption)
This is the new code (String problem fixed but 'post' button is not working) :
#libraries
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import bs4
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from time import sleep
import pyautogui
#fetching hashtags
def hashtags(hash_idea):
url = 'http://best-hashtags.com/hashtag/' + hash_idea
try:
req = Request(url, headers={'User-Agent' : 'Mozilla/5.0'})
page = urlopen(req, timeout=10)
page_html = page.read()
page.close()
page_soup = soup(page_html, 'html.parser')
result = page_soup.find('div',{'class':'tag-box tag-box-v3 margin-bottom-40'})
tags = result.decode()
start_index = tags.find('#')
end_index = tags.find('</p1>')
tags = tags[start_index:end_index]
return tags
except:
print('Something went wrong While Fetching hashtags')
def login(username, password):
try:
url = 'https://facebook.com'
driver.get(url)
user = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'email')))
user.send_keys(username)
pas = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME, 'pass')))
pas.send_keys(password)
login_btn = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.NAME,'login')))
login_btn.click()
except:
print('Something went wrong while login process')
def upload(img_path,caption):
try:
btn1 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div/div[2]/div/div/div/div[3]/div/div[2]/div/div/div/div[1]/div/div[1]')))
btn1.click()
btn2= WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[1]/div[2]/div/div[1]/div/span/div/div/div[1]/div/div/div[1]/i')))
btn2.click()
btn3 = WebDriverWait(driver,20).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[2]/div/div[1]/div/div/div/div[1]/div/div/div/div[1]/div/i')))
btn3.click()
pyautogui.write(img_path)
pyautogui.press('enter')
actions = ActionChains(driver)
cap = WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[2]/div[1]/div[1]/div[1]/div/div/div[1]')))
actions.move_to_element(cap)
actions.click()
actions.send_keys(caption).perform()
sleep(5) # this is mandatory while doing some thing with bot
btn_post = WebDriverWait(driver,20).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div[1]/div[1]/div/div[4]/div/div/div[1]/div/div[2]/div/div/div/form/div/div[1]/div/div/div/div[3]/div[2]/div/div/div[1]/div')))
btn_post.click()
except:
print('Something Went Wrong While posting the image or video')
if __name__== "__main__":
#turn for credentials, driver, and caption
username = input('username : ')
password = input('pass : ')
img_path = 'pic1.jpg'
hash_idea = 'covid'
caption = 'test' # if you want to
caption = caption + '\n' + hashtags(hash_idea)
driver = webdriver.Firefox(executable_path="C:/Users/Asus/Downloads/Compressed/geckodriver-v0.32.0-win64/geckodriver.exe")
login(username,password)
upload(img_path,caption)
I wanted to automate the post with the text I provided in the code. I've tried a number of different approaches, but the post button is still not functioning properly. When I execute the script, it gives the error message "Something Went Wrong While posting the image or video." Any helpful suggestion or response would be greatly appreciated.

something is wrong google crawler. please

# 뉴스 크롤링.py
#######################################'사용후핵연료' 키워드 검색##################################################
import sys, os
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import selenium
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from datetime import datetime, timedelta
from pandas import DataFrame
import time
from openpyxl.workbook import Workbook
sleep_sec = 0.5
wb = Workbook()
# User-Agent를 입력해주세요.
headers = {'User-Agent' : '________________'}
query = 'spent nuclear fuel'
yesterday = (datetime.today() - timedelta(1)).strftime("%Y.%m.%d")
def news_crawling():
service = Service(executable_path=ChromeDriverManager().install())
browser = webdriver.Chrome(service=service)
print('브라우저를 실행시킵니다(자동 제어)\n')
news_url = 'https://www.google.com/search?q={0}&tbm=nws&source-news]'.format(query, yesterday)
browser.get(news_url)
time.sleep(sleep_sec)
print('\n크롤링을 시작합니다.')
#####동적 제어로 페이지 넘어가며 크롤링
news_dict = {}
idx = 1
cur_page = 1
news_num = 1000000
while True:
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
for a in area_list[:min(len(area_list), news_num-idx+1)]:
n = a.find_element("xpath",'.//div[#role="heading"]')
n_url = n.get_attribute('href')
try:
img = a.find_element(By.CSS_SELECTOR,'img#dimg_').find_element(By.CSS_SELECTOR, 'img')
img = img.get_attribute('src')
except:
img = " "
news_dict[idx] = {'Title' : n.get_attribute('title'),
'url' : n_url,
'thumbnail': img}
idx += 1
try:
next_btn = browser.find_element(By.CSS_SELECTOR, 'a#pnnext')
next_btn.click()
cur_page +=1
# pages = browser.find_element("xpath",'//div[#class="sc_page_inner"]')
# next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('href')
pages = browser.find_element("xpath",'//table[#class="fl"]')
next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('aria-lable')
browser.get(next_page_url)
time.sleep(sleep_sec)
except:
print('\n브라우저를 종료합니다.\n' + '=' * 100)
time.sleep(0.7)
browser.close()
break
########################################################여기까지 수정 완료################################################################
# 엑셀파일 추출
print('데이터프레임 변환\n')
news_df = DataFrame(news_dict).T
folder_path = os.getcwd()
xlsx_file_name = '{}_{}.xlsx'.format(query, yesterday)
news_df.to_excel(xlsx_file_name, index=False)
print('엑셀 저장 완료 | 경로 : {}\\{}\n'.format(folder_path, xlsx_file_name))
news_crawling()
this is my code. I use it on Korean website and it works well. But after I modified it for google search, it wouldn't work.
I want to search something on google and then get the news titles into a xlsx file.
I before used it in Korean website, so I changed the part below
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
and when I run the code, it only gives me an empty xlsx file.
can anyone help with this please? I would be so appreciate.
Here is one possible solution:
from openpyxl import Workbook
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
workbook = Workbook()
worksheet = workbook.active
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
worksheet.append(list(news.values()))
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
workbook.save(f'google_news_{yesterday}.xlsx')
driver.quit()
Output is xlsx file google_news_11.10.2022.xlsx
In the get_url function, you can pass a range of dates for which the news will be displayed. For example get_url('spent nuclear fuel', 01.11.2022, 11.11.2022)
You can also save data to csv using this solution:
import csv
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
def save_to_csv(data: list) -> None:
with open(file='google_news.csv', mode='a', encoding="utf-8") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow([*data])
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
save_to_csv(news.values())
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
driver.quit()
Output is csv file google_news.csv:
COP27: nuclear boss doesn't expect surge in waste recycling,https://news.yahoo.com/cop27-nuclear-boss-doesnt-expect-072631885.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAGBAUHAgMAAf/EADcQAAEDAwIDBgQFAwUBAAAAAAECAwQABRESIQYxQRMUIlFhcTKBkdEHI0KhsTNS8CRFgrLBFf/EABoBAAIDAQEAAAAAAAAAAAAAAAMEAgUGAQD/xAAlEQACAgIBAwQDAQAAAAAAAAABAgADBBEhBRIxIiNBURMyYXH/2gAMAwEAAhEDEQA/AHNqb0oAIr3uctiMyvtFY25VHfuUW3xVLcWkECgabq5d+IWS6o9gF7IzXWtAMcxsGy1S3wJMXHcYlrm9noSTkUyt0tMqACDk4xUTidhItaVITgAiqe0zFwYLuUg4GEZ5Z6UMv7p3HmrFmGG+RPdt1TMiQmMCp0Z+HmmuU8Jy7i3mTNdbBGVIUSvP15e9XtqbbbjIwElSvEsjmSfOrdT2R4APYD7UAuWP8inYB/sFy+D1oYLTjwe5kaxmoUiyzrfFBD3aKCfENgPTHy8sGmUpx5eVKBCR686OXaSiVHejurU08nBC080nz9R9zXq22SpkbV7QGWGXQVwVHYYOCcnb5HeqyU4lMXSTueVdxWpnavtk/Ao4TglJ9v8ANqjXSFju5QsleCVoAI0eXP06VC3HUeoSywep2FfxNyYn4AtDXZKecAK1b5NNezSjwgDAo5wIvMTT5ZFJXFpCyCd6pbLHtbmBFaVOdTHf/pyLiouPLJT0TnYVNsgPfkK8lCqO3ZDY9qY8L2xUiOqSdhqOPlV1olpoCyV08+Jo11i95sq0gblG1Z02yp951tKvgZUSnzwMfyRWnwiZFuCefhrMb6pdovylAYQokEeaTsaPYvIaUWGxeq2keedS7snFdoYbYYdfWp8oBWAk6UEjkSNs1YcW3W4wbc0uIpplT6gEuBOvbzopYuF1z2Q7GlKjtNuFKlIG6xnV+4O/tWgS0xWwzFUtrQ22lASpQyRy2FDI0OImmydEQBbb9xA7JKe9rkFKQpTRi5QQeWCKQ3q3ynrb3tqPpkFA1ozz9s0raYioaQ8lAUkJynPPFVM+7pkOlpvSANjUWIUgzqqWBHxM0Uq3vPJDTkluaBpKQjAwOnPPpXN0elNhSXmylRGAPb+fPPrVtdLHHnXw6XA3I2KlJIBIJ238xg/4ah8UOlhLUVjDi4yezdVzOrnv9f4oljkjUlhVKHDGXH4fvhuKpThA3NcXviJTdwWhoApA86LWS6OsrLeceldyt3iTzNWnRelVhDdYN7lZ1LLIt7EMq4WOzBrWeBoqRw6gqG6wVfWsfgrwgD0ra+Cik2JgdOzFVlQ9Rml6nb7C6+5e2gBEXSemaCfibBy2mSgfCcH2pvb3B2i2x0NVfF8USra6gjmKM42sqcaw15IMLcC3dtmILetWCtWoeoOBUS4SeF7Zf3ZNzlCWUbd3P5nY533z7edGY7yoctt4A/lK3APTrSF6AmQ+3ItkK1qLnjEp6KFr3355570sp+4/lVBXJX5jFriONdGUot2SNGU6kFO2NtjUNmAtp1Tzx9QOlfWu1OQYq3pTxfeVlSlqPX09K8J94AZUwga1nbNCfkwCEAaEp+IJExuLKftzqmpAbOFo3IT1xnrRNqWWLclHZkrPiWo7kqO5J9aZRY6lMrQ9lSnU4IHPB6D/ADrVBKtTtnkSYM7GtlZ3/uT0V8xUyQU8Q+GPeOjo6hmDIUq47jG9Xz48fyo0y6lV8IRyzSd34h7Vtel8YqzK9TGso8w/BGU/Ktl4CcC7MgZ3AxWNwdhWlcDTuxUI5OArcVjkbtebXMp/JjHXxE8SR2d6WyT8Q1CrG5th1pQPLFGL2+Yd7jPjbOQaRqdD0UKBzkUdCCSJTZFRVUs+xMguMfRcZLSRzWSKl8M3SVb9MNB1pdWEIB/SSdsfWp13aaj3R1bvUchR1+SI9zjuo8KG3EuH5HNC/EzGO25lK1kNydcRhPlXMK7OQVBJONY5E+Xp865hxxgrfUCsDO3Snt5tUKUy488tLCCkqL2cAD1ofBl2SNIQ5IdckME4PZNEHA6qzjb23qD47k+mV6ZSa9XmIuFLPrInPp8I/pg/q9faqf8AFu3RlwI1zKcSkud3KgcakEFWD7Ebe5p3bLjAucJL9rkNvMDw5b/SR0I6e1A/xgkaYNvig7rcW4f+IAH/AGNM1VhBqK22M7d0yOLaHk3IOxyHmzz3GR96SOtryPArl5VVxVdmd/1bc6aWu4xYNsjNm1RJK1pK1uOpBOdRGP2q0p6kcdO3WxFjinIfzzM9jtrSsNKSQsK0lJ6EHBpbalORJ8QnbxgV4OW4p4qilbZS3IUF48iOf3+dLOKLXFj24zEulL7J8KQNiMj75qiCFuZr2zUrXR8EGfvGbjSER1LcR2v9ud8eeKo3eKJwjCOy4G0YwVjdR+1Gw+pwnJJwcZPvXKnMnbpTq1BTuZy3MssrFfgCTFv63NTisk9Sc5qBJ8UheRnwkYr8dS24U9oPEnkQSD9RXzmO1FTik17iJK7jwmwG3iYyG2nHlJP9U7aU+2cE/KgZhP3C4otsXlzcV/aKUuzu1/Du1JzgFaGsA4zpBz+6c1H4HHY8REKxofGNaiSSsbgDJ966P1M9rmKuGeFo9pQlbDjrboBClBXPfqOVUXGVkn8RXtbbZaQ1DjJSHFgpStZJVgHptjPyrQ86Ekqz4d+VBrlIiTJzvfuINLPaHRHjlOw6ZO+TQGfXMPXWGPMIP/h/eWWVu5jOaRnS29ufTfFTYjCGLZARMww8WAtTa8ZTqJVg/WrC4Mxf9nuD5cbxrbU4Clfp6Gq55sSVB2VGUXCB8XOl7LS3Bj1VKoe6LVXB/u6O7zICV6R+W4gjfqM5/wDKouIzOu9tcjMWlwyjqBeQtJbUMHlvvk46V62/uqn2mTboPjVoU4WAVkdTk9aSXaz29NpekCKgOoGEkE7dKZorD/sYhmZL0L7YBP8AeIB4V4KQpgv8Ql6MpzJbjrBRyOMqPP6VX8V8PMWnL0J1Rb1BKm1nJTnkQeoqbEuct68PQFPKEbQ4lKB+goBIKfI7b+eaPT7rKfjOtOqCkkJPtv0pvIoahu0ncSxsgXp3a1KpatLgIOxGDXzqhkHPOuXh/p1eoz9K8HlHb0TQIxGVuuBXw1FhqOSxNcIHoUgj91Gp1vfXHdQ8k+JtYWn3ByKIQFrEppIUdJTnT0z50jaOwzvtRk5Wc+ZthmNdwMwH8rsu1z6YzWLWi+xLc+XJFrZfkqcKi+rc7+h5dafokOH8N3nCfEIjqB7ZUP4rHHyStQpcr5EIGKnc025z094SiVCQ22vZtxlIGfPf71yuTFYwgOtKGMjW5pIHtXklwu8LMOuAKWpCCSR10k1nK5DnaLyrUSr9W9KFfVLJG9O5/9k="
UN Nuclear Chief Says Recycling Nuclear Waste 'Difficult ...,https://www.theepochtimes.com/un-nuclear-chief-says-recycling-nuclear-waste-difficult-after-biden-looks-to-fund-reprocessing-projects_4855151.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAcAAADAAMBAQEAAAAAAAAAAAAEBQYCAwcBCAD/xAA3EAABAwMCBQIEBAMJAAAAAAABAgMEAAUREiEGEzFBUSJhFDJxkUJSgbFiocEHFTNDctHi8PH/xAAaAQADAQEBAQAAAAAAAAAAAAACBAUDAQAG/8QAJREAAgICAwABAwUAAAAAAAAAAAECAwQREiExIhOB0RQyQWFx/9oADAMBAAIRAxEAPwBshrONqIbZz2rc01tRDbNVCeaEMe1eqj+KMS3ikfFt6VZ4zbcfT8S/nTn8I7nHehlJJbZ5Jt6QDceIrbAkfDuKW44M55ScgHxSxPFc9xzDFtHLPRS81JRyqRcHC0VPq1kBfXbNWlqiktp1J3xttU63LnHwpUYcZ+hcbiWMuS3Hmx1sLXsFLHpJ9jVH8M2tIUkZBpZIs8STF0PICxjcEdK84cfXElrtUhwrx6mCTupPj3Io8bL+o+MvQcrD+kuUfBmYoSMpG9DOMK8U+5NYLZB7U7on7J9bGrHprSqKCelPXI+nOBQi0J1b0R43so2FEpRQkdSthRKlLQNxQ7PG7TttXHP7SZLz3EshDJWOQhDZz9NW33rsCXcDcVyHi1Yb4uu6u5WkDPf0p/bFY3P4m1K+QviWR74JKy24rPoQlLmnUrGSad2UT+HLxGhTFKdZkt5SUnIRntv3Brbw/diplSXPkZRqUcZwKDau8+RxGxITbHHGQrGwJAT0GT0B361MluW9leEVHT2VV44kZtEtmPITracOFOAYV0/D5xW1otybtAejOpUlTjYSoe5J/YGmcSHDuUYvy4PLWUqSUvoGpIORj9xmtHD1p5HEQajgGNHSlRJ7DG365KvuaChJ2LXoWW2q3vwseXtWKm6K0ivCmrPI+fAFt0IuP6tgKbLRWkpGeookzwijvNKI3xR6HEkYwFCp9CCPlVmi2StJyM1wPQ2U0NOpvceDXPP7RbUG8XdsK1qIbeGNumyv5Afas77xxKaeWzaw2lCdueoaio+Ujpj71I3Lie7TIzkWVLU/Hc+ZK0pPv4zRSqcohVtxltBNvjqTw/JcaeS2665oQo9NhnfxTWzxeJ22EuRpdueSlOVMrURnByBnTg/XIqQtF2MN0MTBriqUVaeuD5H2rpnD1wtMiMgIebDhG3Yio9sZQb2WqHGS9CrfdH51t5rzC472MKbUnBBz+2xqt4TihuCuQQdb68lR7gbD+tRd0vMSOQlo89wYCtJ2IHbP61SWfi5ly0PSn4vIYj4QAjfPsKLFh8nMwzrdxUEVZSK1nSM4IpHbuILddXwzDkqDqhnQpJFNfhnOys0+SjBbigr2rQptKyVaqMIUhGF1q1MdyBRpnD5szcFnCnZP6o/5U4tUy5xo7kdLz5Q582tZ6eAO1ZR1JcbStJyFAEHyKKbG/WnlWvdnHY31oV3KWI6wZEBXLPRTS8fcEUwl2VqVwqLqyyY7jCgVZz621bAnPfcU4gMW91SVT0KeKTlLaRsD707uvJulpVbgwpqM4RzFa91AHOM9qm5OYlYlF9Io42L8G5LtnJP7uVLSptnK3sp0JHQ7/wDtUFt4faj25yXd5DsdLWSvlqGMD396s7VwhY4Ljc1hTyFoznLpUnoR0P1NZXrhu03mKiLHuhaIOrSNOlR7ah3H0NKZGTG2a49Iapx5VxfLtk3ardIuVvelt6GEoCXCHF40IOQkEnvtk0VInJTZ48KK/wAxsnW6rBCSr60NfrJdYNvXHfGuOpQUt9g5SUpBwDnAHsD460klsXNmDoYcblRBjCUpKVDxkdqdnWuPKHcdek35cmp+nR+AobjXPuDoCT8iM9ark3hSXNCdRP8Ap2pfwS609wvAU+0W3+UAtKxvmnLiNWySgg+K9FJIyl6alXVah2r1EplYy4E6qHciBI1KIAoYxFKOUOjFHpAnDbBLLkMNkDUz6c57dqcxFKUob5z1FTdudbijQgZJOVHHWqOI9rSlTaB7+1KXZU3BQXhQpx4KXJlPBjNpQlQGon8IopQW4rTpJSkfQUFapaUqAzuR3pgXlJXqD2kH8uOlTXsor+gi3qmI1gNAoz6f9qHevMELLdxtuMHqtms463Va1MzXFKx8qqDdvk+MdEyCVp8aQT9qKuDk/G/8OzmorthbjYmQHm7HLDjb6dDsMqB2P5Seh9j/ACqStbse2SH2L2iWhpKiG3W8HUc7BW2QfNUMRNllqVMjvrhvtEKVy1EYOfy4/wC4od2RJXHb5Nwg81ai6tSkEKKvB296p1Wumlpfh/cn2Vq21b/KGFv4kbjxm2UxZykAbLDClD9qcQb5Glq0olJS6P8ALcBQr+YqZbfvePTNgOgn5Q4Un9Nq3iZdAkIn2cSm87LQA5+u1Iu6xPpjX6eqS7iWKtTreUPpJ/KTvUnfbuqBMSyZCkEthWAnPc1uclt/BrbdZejIcRpzjoe30qRuVyjS5ObtAmqktjllcdWELA/EMjuc0xDJ5rT9FLMVVPkvCKt8hrmhDuAD3NOX7ozbSgAk58b1Ip9SwCe9OVMoVy9WSQOtDGrnNI6rHGOw53iV9Tw+GZKk/wARxRTN4nyRpaRpV/GdqCREZQNQTkjzWaZTrX+GQn6CnVhVr9xi8mz+Bwyicl3nJmcpaxnSEagD9xRiuJJreIt3ipfjY3WjcZ7HfcfpQLL7pYKyo6tPWlFtusp6Upp1SVoJIwoZrWWLR1paM45F3e3tD74uIvU6y84lPsvcD3Br8zEbuSVqQWFaU5K1qCaR3UfDPgs+nV1ryzkPSHGnQFIUncUxKTa4pLZhGKXyfg2fslySsLgvxktax6jKOB/UU3t0S9sDmLnxtJHqKXyr+lRN+1wHGxFeeQFdRzCfPmgBcp2jeW8fqqo99UuTT19inVdFLrZ1uQ7JVESlx9t1tW/gip+6WZE+Ql1bziCEBPpPXrSiySHoshLSXVOIWMkOb4z4qkceUlWwHSkmnBje1ZHTP//Z"
COP27: UN nuclear chief says radioactive waste recycling is 'difficult' technology,https://www.deccanherald.com/international/world-news-politics/cop27-un-nuclear-chief-says-radioactive-waste-recycling-is-difficult-technology-1161036.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAACAwEBAQAAAAAAAAAAAAAEBQIDBgEAB//EAEIQAAIBAwMBBgMFAwcNAAAAAAECAwAEEQUSITETFCJBUWEGcYEjMpGhwRVCsTNSotHi8PEWJTQ1Q0RicnOSk9Lh/8QAGQEAAwEBAQAAAAAAAAAAAAAAAAIDAQQF/8QAJhEAAgIBBAAFBQAAAAAAAAAAAAECEQMSEyExBEFCUZEUIjJSYf/aAAwDAQACEQMRAD8A02K7VdolwIF726PMcluzXCj2Hrj1qSTQtM8KSo0qAF0DAlQemR5VbUiVMlXqlj2r2K2wRxMA8jNEiSIAYUHFD12lasezjnc2cY+VRIqdcxWmEcV6pYrtBpxcg5FWCYjyFQr1Y1YCzud/fDOoT92hP+72rEE/80nX/tx8zR1rZ29nF2VrCkSdcIMZPqfU0MNNvJP9K1S491t0WIH+J/OjraBbeJYkaRlA6yOWJ5z1PzpI99GsznxdLNLNp+lQzvAt40jSyIcNsRd23PlmqYtCk0SX9o2d5N3FIC9xbNI2WIUngj3xTD4t0qW9tYrqzmEV3ZEyRkjhhjlfyoG30XXb3UYbjWmtjbwwtGlvayuAcjGWyOf8KupKuydMzukyXOqyy393MJ5WchYWkJ7NfZaN1PX5Y7eG2ttReOUzqrSBgdi5wQc+nJ5rkfwxr1jqk1zbwwT9orKHMoHB6Ej148qifhG6hTToO7rLOZzNdFnXayAr4RzzwT1xnNNcdV2LzVBdlqepQa89hDqY1iI27SKYwmQwHAJH08/Or01DW7TUtOgu7pJXvP5W37NR2WT5EeWOfoaei1jsLac6RpEcM7L4diRqCfLODSbQ7XU7G6M95o8k9zK/2l21yhKgnnC1yZ3qkqsvjVLkjLf69BrMGmd7tp5XYbtsGNq4ViT9CfwrW4FIdFtriX4g1TUbyFoycQxbiOB59PkK0OKMd02wkQxXsCpYr2KpYpkbObSby4W3VLuzuh962kuZY36eQ3YP0zTmTS3giWQ3WoRI3Qtcsfl97NZq7lt9ViuILu1SK5txnsLogNEcgbuo8jkEHHTniirhoJLSIWHeJbOM9m8rOsok5ORvUkHy9+BXLqr0lUv6EfE17aRW6RXNoJy0BAuHP3PLOQOpPy/Sgxruh/Y7tMkjMjhMB9u3wqcnkceICq/iS3az0+INMZUdH3TXdsFI4G1EGOB7n0pY945hmMtrZRyRQIXjks1+0YnocjgY5+lUWXGu48i7cn0xo3xFoSIHksdQjHP3bhhztDY4f0YV6bVtI/aVssS6gVZApIvJDzIUC4PaeR60LFMqTQZ0rRbiSVg3YPZbike0eInoPw8qC7qRdS28iaehgjlldokyFOdwXPng4C+g9hRu4nwohomvM1F5faTYyGN5NSO1SSyXrkDHUZ7Tk+3lVKa3pEhcRz6odhAJ7w3mcceLnn0rNpqaS20HeE0hi8LOXeLdtCjIBAfrknjr1wPWD6oS1w0NrZLEOzaR44WXdubjILdc7jmrrHH2E1M0NlrenLckd81INNcERhidrHcq49+SM1rHmMZ5P0xXzSyxb6rFHdwWcBd2d2Mbsy4YEDCMfQHzrXXd/BB2rRztPHAjNPKE2BSACQN3XqOn9dTyOMOhoLV2Npb0qSBV8VrfyoHSN9p6bmC/xpDp+raa13HLc3rPbxw75rMWzlyxwR4gMDH1zTr/ACw01Uj7tYStEVBXFmwx7dOfmOKnuDuJlU0y51V3j1aBbieM5jlRcHPXJIAAYEfX08qNs9Nu7YvIzTo0mO1lcLhipBGTjk/ocelXj4sjsrO4jsYXz9622jcZMHBBPPOD8+KTz/FF3qE1ssNqUAhCyiclVDMSTkeeQRXmPxGeStfB27MF2G63bjWIVa5u53eAMEBKrnjGSPkPzqi80UzZk75JL2oCXDNInKjnrjkjj0+dTtP2bFcdo0YklC+ErJwTg5zz0Pyo6b4ytbaWNI7ErCiDiKIYV9hwMjjG4ZxRLK7pIyMF2LYdOkhuJp7TV2t5JvvsCjbh6eWBQnw9E9ldSvDe47GSSPDIHWQeHkj369fKibXVYZ/tJ4reKTd4hIQWf33A4FC6bJbWzXBuGjdHumwfCUIIXHJ4qsJ/a2LKKuh3Jd3ct1bzG6t8W+7A7lwSQB/Ooe1u7mLTLWyF7bym2RQSlpv5UY5ORn+/Smek3mgWt8gljhnWZcrIV8Knb0UexyKCn+I9Julu+1Frhbd2RJFDrnGOh69c49qmvFydJX2P9OqbYM1sqTxXxu40laQkIYsYIU58+M5Hyx15qWp63oK2UtxGvbyyuI50lyd52AAhemMDGPb3oOLVdNM8dxFDDwMFuzxx08P0z7YOKOjfS0tb9IbYiFYo5fBGOd2wZHvyfxNM28vm69hIqMOKViZLhZ7q4lsIO7W8zEhiM7hxjjOc4+nPSrV0+4kz/nKWLbxsW1JA/pVXcX9qs3awbspwQUHiGevXrTfRkjvrZ5UnEKh9oUR7vIHOc+9G7ojyGmEpC/TY0j1K122MqsZ4/F2rH94delDXMUst7M72LkFzjxOAR0/T1ptYXEraxCWLrGLkZXcf5wHNXPMHkJQdAFIUn61bdj7HPpdUJrWELOHGnypwftMSEDg+vFFw2jnT5oxo8wzcRYQ9rkjbJz18v1ortlRWLHYc/dcnAq5bte7bUfLtIrBfQAHP8aN6P6htiiWwlRsfsV2HXK9r/wC3WvR6fKqr2mnNzIW7HDeEbQAfvA/nTftpJwG3MSBkD0rpkdCA+0of3geR/wDaVZkvSMoNJpC9rErF/qxlXOeY2PP/AJajqWk28VyssemReKKJiTET4jGpP+09SfxpnJNC5AI4zk58xir78bmjRWGxoYuT/wBNaZZlXEUY4S6tiWS0ZpGbuYYDjd2f9sf39aLitneK67GJQj2sfBHhYgx9fH5c/wBdFGKFUCEkggnJ9ahDOLSKQA8SHY2f3eQR/Ch503+KMWOubAO6FAO0tYUOOhTGf6dTR+xBVY0jBOcDjPvwaNvAkkIxIu5PI+YqENsHjDNJndyOPKlee/SvgFirzAJp3hbtEQFgSfCRweuahbzuIzy2zHIY9DVVnAsaPICzH0Y5FW20amMNzncwqJR9lkDRXAeO5zhuD1NESRRDbK8jhsEKOm38PKlSlo2LBs8FsEDFGXMjPYozdXByaA6QZbyxxqMMcqPNsnPnQs1wN4IYSKxJIQcgfpS2GR3UhmPD4H513aIJRs9M8+/WgLGAuIWlIZWyuCPMkY86sfUHmIZ154UNwNuwAY9+lKnch9w4ZTgEVxZpG+85PiPU5oC75LH1GbvOwldg/wCI+tU39/LJFKD/ACQkAOD0/KhlG5nyT91qGluJHCx5whPIApkhW+B1HdgyIQ2zK8gNwRUI75oV2KQQD5nFId7EQAknPr5VVduRO3ArdFmOR//Z"
Tested on Python 3.9.10. Used Selenium 4.5.0, openpyxl 3.0.10

How to scrape data from each product page from Aliexpress using python selenium

I am trying to scrape each product page from this website: https://www.aliexpress.com/wholesale?catId=0&initiative_id=SB_20220315022920&SearchText=bluetooth+earphones
Especially I want to get comments and custumer countries as I mentionned in the photo:
enter image description here
The main issue is that my code does not inspect the right elements and this is what I am struggling with .
First, I tried my scraping on this product : https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch
Here is my code :
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import html
import cssselect
from time import sleep
from itertools import zip_longest
import csv
driver = webdriver.Edge(executable_path=r"C:/Users/OUISSAL/Desktop/wscraping/XEW/scraping/codes/msedgedriver")
url = "https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch"
with open ("data.csv", "w", encoding="utf-8") as csvfile:
wr = csv.writer(csvfile)
wr.writerow(["Comment","Custumer country"])
driver.get(url)
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
review_buttom = driver.find_element_by_xpath('//li[#ae_button_type="tab_feedback"]')
review_buttom.click()
html_source = driver.find_element_by_xpath('//div[#id="transction-feedback"]')
tree = html.fromstring(html_source)
#tree = html.fromstring(driver.page_source)
for rvw in tree.xpath('//div[#class="feedback-item clearfix"]'):
country = rvw.xpath('//div[#class="user-country"]//b/text()')
if country:
country = country[0]
else:
country = ''
print('country:', country)
comment = rvw.xpath('//dt[#id="buyer-feedback"]//span/text()')
if comment:
comment = comment[0]
else:
comment = ''
print('comment:', comment)
driver.close()
Thank you !!
What happens?
There is one main issue, the feedback you are looking for is in an iframe, so you wont get your information by calling the elements directly.
How to fix?
Scroll into view of element that holds the iframe navigate to its source and interact with its pagination to get all the feedbacks.
Example
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
url = 'https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch'
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get(url)
wait = WebDriverWait(driver, 10)
driver.execute_script("arguments[0].scrollIntoView();", wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.tab-content'))))
driver.get(wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#product-evaluation'))).get_attribute('src'))
data=[]
while True:
for e in driver.find_elements(By.CSS_SELECTOR, 'div.feedback-item'):
try:
country = e.find_element(By.CSS_SELECTOR, '.user-country > b').text
except:
country = None
try:
comment = e.find_element(By.CSS_SELECTOR, '.buyer-feedback span').text
except:
comment = None
data.append({
'country':country,
'comment':comment
})
try:
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#complex-pager a.ui-pagination-next'))).click()
except:
break
pd.DataFrame(data).to_csv('filename.csv',index=False)

if or try loop for an element in a page selenium

I am trying to scrape agents data here. I am able to get the links from the first page. I am using numbered loops because I know the total number of pages. I tried to run this as long as the "next" page option is there. I tried both "try" and "if not" but wasn't able to figure it out. Any help is welcome. Here is the code.
from selenium import webdriver
import time
from selenium.common.exceptions import ElementNotVisibleException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome('C:/Users/../Downloads/cd79/chromedriver.exe', options=options)
links_total = []
driver.get("https://www.cbp.gov/contact/find-broker-by-port?field_port_location_tid=All&field_port_code_value=")
def first_links():
initial_data = driver.find_elements_by_tag_name('td')
for initial in initial_data:
page_links = initial.find_elements_by_tag_name('a')
for page in page_links:
page_link = page.get_attribute("href")
links_total.append(page_link)
driver.refresh()
if driver.find_element_by_partial_link_text('next'):
next_page = driver.find_element_by_partial_link_text('next')
next_page.click()
time.sleep(2)
new_data = driver.find_elements_by_tag_name('td')
for new in new_data:
links = new.find_elements_by_tag_name('a')
for link in links:
new_link = link.get_attribute("href")
links_total.append(new_link)
for i in range(1, 23):
first_links()
for link in links_total:
print(link)
Try-catch would be better option
from selenium import webdriver
import time
from selenium.common.exceptions import ElementNotVisibleException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome('C:/Users/../Downloads/cd79/chromedriver.exe', options=options)
driver.implicitly_wait(10)
# links_total = []
driver.get("https://www.cbp.gov/contact/find-broker-by-port?field_port_location_tid=All&field_port_code_value=")
def first_links(links_total=[]):
initial_data = driver.find_elements_by_tag_name('td')
for initial in initial_data:
page_links = initial.find_elements_by_tag_name('a')
for page in page_links:
page_link = page.get_attribute("href")
links_total.append(page_link)
# driver.refresh()
try:
next_page = driver.find_element_by_partial_link_text('next')
next_page.click()
time.sleep(2)
first_links(links_total)
except (TimeoutError, ElementNotVisibleException, NoSuchElementException):
print("NEXT btn not found : ")
pass
return links_total
all_links = first_links()
for link in all_links:
print(link)
You don't need to use Selenium actually. You could do it with BeautifulSoap like so :
import requests
from bs4 import BeautifulSoup
page_num=0
url_cbp = r"https://www.cbp.gov/contact/find-broker-by-port?field_port_location_tid=All&field_port_code_value=&page={}"
def get_links(links_total=[], page_num=0):
page = requests.get(url_cbp.format(page_num))
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(id='region-content')
table_cells = results.find_all('td', class_='views-field')
for cell in table_cells:
# print(cell )
# print('\n\n')
cell_link = cell.find('a')
page_link = cell_link["href"]
links_total.append(page_link)
next_page = results.find('li', class_='pager-next')
if next_page:
page_num += 1
get_links(links_total, page_num)
return links_total
all_links = get_links()
for link in all_links:
print(link)

Categories

Resources