Wrong output in the CSV file using xPath expression - python

I wrote a code to get the following value "Exam Code", "Exam Name" and "Total Question". The issue is that in the put CSV file I am getting the wrong value in the "Exam Code" column. I am getting the same value as "Exam Name". The xPath looks fine to me. I don't know where is the issue happening.
Following is the code:
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import time
option = Options()
option.add_argument("--disable-infobars")
option.add_argument("start-maximized")
option.add_argument("--disable-extensions")
option.add_experimental_option("excludeSwitches", ['enable-automation'])
# Pass the argument 1 to allow and 2 to block
# option.add_experimental_option("prefs", {
# "profile.default_content_setting_values.notifications": 1
# })
driver = webdriver.Chrome(chrome_options=option, executable_path='C:\\Users\\Awais\\Desktop\\web crawling\\chromedriver.exe')
url = ["https://www.marks4sure.com/210-060-exam.html",
"https://www.marks4sure.com/210-065-exam.html",
"https://www.marks4sure.com/200-355-exam.html",
"https://www.marks4sure.com/9A0-127-exam.html",
"https://www.marks4sure.com/300-470-exam.html",]
driver.implicitly_wait(0.5)
na = "N/A"
# text = 'Note: This exam is available on Demand only. You can Pre-Order this Exam and we will arrange this for you.'
links = []
exam_code = []
exam_name = []
total_q = []
for items in range(0, 5):
driver.get(url[items])
# if driver.find_element_by_xpath("//div[contains(#class, 'alert') and contains(#class, 'alert-danger')]") == text:
# continue
items += 1
try:
c_url = driver.current_url
links.append(c_url)
except:
pass
try:
codes = driver.find_element_by_xpath('''//div[contains(#class, 'col-sm-6') and contains(#class, 'exam-row-data') and position() = 2]''')
exam_code.append(codes.text)
except:
exam_code.append(na)
try:
names = driver.find_element_by_xpath('//*[#id="content"]/div/div[1]/div[2]/div[3]/div[2]/a')
exam_name.append(names.text)
except:
exam_name.append(na)
try:
question = driver.find_element_by_xpath('//*[#id="content"]/div/div[1]/div[2]/div[4]/div[2]/strong')
total_q.append(question.text)
except:
total_q.append(na)
continue
all_info = list(zip(links, exam_name, exam_name, total_q))
print(all_info)
df = pd.DataFrame(all_info, columns=["Links", "Exam Code", "Exam Name", "Total Question"])
df.to_csv("data5.csv", index=False)
driver.close()

You are getting the exam name in there twice, and instead of exam codes because that's what you are telling it to do (minor typo here with having exam_name in there twice):
all_info = list(zip(links, exam_name, exam_name, total_q))
change to: all_info = list(zip(links, exam_code, exam_name, total_q))
Few things I'm confused about.
1) Why use Selnium? There is no need for selenium as the data is returned in the initial request in the html source. So I would just use requests as it would speed up the processing.
2) The link and the exam code are already in the url you are iterating through. I would just split or use regex to that string to get the link and the code. You only really need to get the exam name and number of questions then.
With that being said, I adjusted it slightly to just get exam name and number of questions:
import requests
from bs4 import BeautifulSoup
import pandas as pd
urls = ["https://www.marks4sure.com/210-060-exam.html",
"https://www.marks4sure.com/210-065-exam.html",
"https://www.marks4sure.com/200-355-exam.html",
"https://www.marks4sure.com/9A0-127-exam.html",
"https://www.marks4sure.com/300-470-exam.html",]
links = []
exam_code = []
exam_name = []
total_q = []
for url in urls:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
links.append(url)
exam_code.append(url.rsplit('-exam')[0].split('/')[-1])
exam_row = soup.select('div[class*="exam-row-data"]')
for exam in exam_row:
if exam.text == 'Exam Name: ':
exam_name.append(exam.find_next_sibling("div").text)
continue
if 'Questions' in exam.text and 'Total Questions' not in exam.text:
total_q.append(exam.text.strip())
continue
all_info = list(zip(links, exam_code, exam_name, total_q))
print(all_info)
df = pd.DataFrame(all_info, columns=["Links", "Exam Code", "Exam Name", "Total Question"])
df.to_csv("data5.csv", index=False)

Hi to get the exam code I think it is better to work with regex and get it from URL itself.
Also below code gives me the exam codes correctly except for 4th link which has a different structure as compared to others.
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 14:48:00 2020
#author: prakh
"""
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import time
option = Options()
option.add_argument("--disable-infobars")
option.add_argument("start-maximized")
option.add_argument("--disable-extensions")
option.add_experimental_option("excludeSwitches", ['enable-automation'])
# Pass the argument 1 to allow and 2 to block
# option.add_experimental_option("prefs", {
# "profile.default_content_setting_values.notifications": 1
# })
driver = webdriver.Chrome(executable_path='C:/Users/prakh/Documents/PythonScripts/chromedriver.exe')
url = ["https://www.marks4sure.com/210-060-exam.html",
"https://www.marks4sure.com/210-065-exam.html",
"https://www.marks4sure.com/200-355-exam.html",
"https://www.marks4sure.com/9A0-127-exam.html",
"https://www.marks4sure.com/300-470-exam.html",]
driver.implicitly_wait(0.5)
na = "N/A"
# text = 'Note: This exam is available on Demand only. You can Pre-Order this Exam and we will arrange this for you.'
links = []
exam_code = []
exam_name = []
total_q = []
for items in range(0, 5):
driver.get(url[items])
# if driver.find_element_by_xpath("//div[contains(#class, 'alert') and contains(#class, 'alert-danger')]") == text:
# continue
items += 1
try:
c_url = driver.current_url
links.append(c_url)
except:
pass
try:
codes = driver.find_element_by_xpath('//*[#id="content"]/div/div[1]/div[2]/div[2]/div[2]')
exam_code.append(codes.text)
except:
exam_code.append(na)
try:
names = driver.find_element_by_xpath('//*[#id="content"]/div/div[1]/div[2]/div[3]/div[2]/a')
exam_name.append(names.text)
except:
exam_name.append(na)
try:
question = driver.find_element_by_xpath('//*[#id="content"]/div/div[1]/div[2]/div[4]/div[2]/strong')
total_q.append(question.text)
except:
total_q.append(na)
continue
all_info = list(zip(links, exam_code, exam_name, total_q))
print(all_info)
df = pd.DataFrame(all_info, columns=["Links", "Exam Code", "Exam Name", "Total Question"])
df.to_csv("data5.csv", index=False)
driver.close()

You don't need selenium because the source code contains the info you need without using JavaScript.
Also, most pages redirect to marks4sure.com/200-301-exam.html, so you'll get the same results. Only marks4sure.com/300-470-exam.html don't.
import requests
from bs4 import BeautifulSoup
urls = ["https://www.marks4sure.com/210-060-exam.html",
"https://www.marks4sure.com/210-065-exam.html",
"https://www.marks4sure.com/200-355-exam.html",
"https://www.marks4sure.com/9A0-127-exam.html",
"https://www.marks4sure.com/300-470-exam.html",]
with open("output.csv", "w") as f:
f.write("exam_code,exam_name,exam_quest\n")
for url in urls:
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html5lib')
for n, v in enumerate(soup.find_all(class_ = "col-sm-6 exam-row-data")):
if n == 1:
exam_code = v.text.strip()
if n == 3:
exam_name = v.text.strip()
if n == 5:
exam_quest = v.text.strip()
f.write(f"{exam_code},{exam_name},{exam_quest}\n")

Related

Scraped data is not saving to csv file as it keeps returning a blank csv file

My scraper is calling the website and hitting each of the 44 pages and creating a csv file but the csv file is empty. I am returning after each of the functions and saving the data to a csv at the end of the scraper.
Can anyone see what is wrong with my code?
Code:
import pandas,requests,bs4,time
from seleniumwire import webdriver
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import datetime
TODAY = datetime.datetime.today().strftime("%Y%m%d")
SAVE_FILENAME = "/Users/180284/jupyter-1.0.0/pssi_jobs-"+TODAY+".csv"
driver = webdriver.Chrome('~/Desktop/chromedriver_mac64')
driver.implicitly_wait(30)
URL_BASE = "https://jobs.pssi.com/us/en/search-resultskeywords=%22food%20safety%20team%20member%22&s=1"
MAX_PAGE = 44
HEADERS = {
'From': 'myemail'
}
def interceptor(request):
del request.headers['From']
request.headers['From'] = HEADERS["From"]
driver.request_interceptor = interceptor
def parse_job_post_div(div_html):
soup = bs4.BeautifulSoup(div_html)
job_ls = soup.findAll("div",{"class":"information"})
job_data = []
for job in job_ls:
job_listing = job.find("div",{"class":"information"}).get_text(separator=", ").strip()
title = job.find("span",{"role":"heading"}).get_text(separator=", ").strip()
job_location = job.find("p",{"class":"job-info"}).get_text(separator=", ").strip()
new_row = {"job_listing":job,"title":title,"job_location":job_location}
job_data.append(new_row)
return job_data
def get_data(wd):
job_postings = driver.find_element(By.CLASS_NAME, "information")
html = job_postings.get_attribute("innerHTML")
parsed = parse_job_post_div(html)
return pandas.DataFrame(parsed)
def process_page(url):
driver.get(url)
master_data = []
i = 0
while True:
df = get_data(driver)
master_data.append(df)
if i == (MAX_PAGE - 1):
break
driver.find_element(By.XPATH, "//span[#class='icon icon-arrow-right']").click()
time.sleep(10)
print(i)
i+=1
return pandas.concat(master_data,ignore_index=True)
data = process_page(URL_BASE)
data.to_csv(SAVE_FILENAME)
`
I have tried the above code.
The first problem I found in your code is that the job_ls is an empty list, i.e. soup.findAll("div",{"class":"information"}) doesn't find anything.
Moreover, job_postings contains only one webelement (i.e. the first job of the list) instead of all 10 jobs shown in the page, that's because you used .find_element instead of .find_elements. As a result of these and other problems, process_page(URL_BASE) returns an empty dataframe.
In this case you can speed up the process and use less code using directly selenium instead of bs4
driver.get(URL_BASE)
driver.implicitly_wait(30)
MAX_PAGE = 4
titles, locations, descriptions = [], [], []
for i in range(MAX_PAGE):
print('current page:',i+1,end='\r')
titles += [title.text for title in driver.find_elements(By.CSS_SELECTOR, '.information > span[role=heading]')]
locations += [loc.text.replace('\n',', ') for loc in driver.find_elements(By.CSS_SELECTOR, '.information > p[class=job-info]')]
descriptions += [title.text for title in driver.find_elements(By.CSS_SELECTOR, '.information > p[data-ph-at-id=jobdescription-text')]
if i < MAX_PAGE-1:
driver.find_element(By.XPATH, "//span[#class='icon icon-arrow-right']").click()
else:
break
df = pandas.DataFrame({'title':titles,'location':locations,'description':descriptions})
df.to_csv(SAVE_FILENAME, index=False)
and df will be something like

something is wrong google crawler. please

# 뉴스 크롤링.py
#######################################'사용후핵연료' 키워드 검색##################################################
import sys, os
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import selenium
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from datetime import datetime, timedelta
from pandas import DataFrame
import time
from openpyxl.workbook import Workbook
sleep_sec = 0.5
wb = Workbook()
# User-Agent를 입력해주세요.
headers = {'User-Agent' : '________________'}
query = 'spent nuclear fuel'
yesterday = (datetime.today() - timedelta(1)).strftime("%Y.%m.%d")
def news_crawling():
service = Service(executable_path=ChromeDriverManager().install())
browser = webdriver.Chrome(service=service)
print('브라우저를 실행시킵니다(자동 제어)\n')
news_url = 'https://www.google.com/search?q={0}&tbm=nws&source-news]'.format(query, yesterday)
browser.get(news_url)
time.sleep(sleep_sec)
print('\n크롤링을 시작합니다.')
#####동적 제어로 페이지 넘어가며 크롤링
news_dict = {}
idx = 1
cur_page = 1
news_num = 1000000
while True:
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
for a in area_list[:min(len(area_list), news_num-idx+1)]:
n = a.find_element("xpath",'.//div[#role="heading"]')
n_url = n.get_attribute('href')
try:
img = a.find_element(By.CSS_SELECTOR,'img#dimg_').find_element(By.CSS_SELECTOR, 'img')
img = img.get_attribute('src')
except:
img = " "
news_dict[idx] = {'Title' : n.get_attribute('title'),
'url' : n_url,
'thumbnail': img}
idx += 1
try:
next_btn = browser.find_element(By.CSS_SELECTOR, 'a#pnnext')
next_btn.click()
cur_page +=1
# pages = browser.find_element("xpath",'//div[#class="sc_page_inner"]')
# next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('href')
pages = browser.find_element("xpath",'//table[#class="fl"]')
next_page_url = [p for p in pages.find_elements("xpath",'.//a') if p.text == str(cur_page)][0].get_attribute('aria-lable')
browser.get(next_page_url)
time.sleep(sleep_sec)
except:
print('\n브라우저를 종료합니다.\n' + '=' * 100)
time.sleep(0.7)
browser.close()
break
########################################################여기까지 수정 완료################################################################
# 엑셀파일 추출
print('데이터프레임 변환\n')
news_df = DataFrame(news_dict).T
folder_path = os.getcwd()
xlsx_file_name = '{}_{}.xlsx'.format(query, yesterday)
news_df.to_excel(xlsx_file_name, index=False)
print('엑셀 저장 완료 | 경로 : {}\\{}\n'.format(folder_path, xlsx_file_name))
news_crawling()
this is my code. I use it on Korean website and it works well. But after I modified it for google search, it wouldn't work.
I want to search something on google and then get the news titles into a xlsx file.
I before used it in Korean website, so I changed the part below
table = browser.find_element("xpath",'.//div[#data-hveid="CBAQAA"]')
li_list = table.find_elements("xpath",'.//li[contains(#class="vJOb1e aIfcHf Hw13jc"]')
area_list = [li.find_element("xpath",'.//div[#class="mCBkyc y355M ynAwRc MBeuO nDgy9d"]') for li in li_list]
and when I run the code, it only gives me an empty xlsx file.
can anyone help with this please? I would be so appreciate.
Here is one possible solution:
from openpyxl import Workbook
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
workbook = Workbook()
worksheet = workbook.active
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
worksheet.append(list(news.values()))
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
workbook.save(f'google_news_{yesterday}.xlsx')
driver.quit()
Output is xlsx file google_news_11.10.2022.xlsx
In the get_url function, you can pass a range of dates for which the news will be displayed. For example get_url('spent nuclear fuel', 01.11.2022, 11.11.2022)
You can also save data to csv using this solution:
import csv
from datetime import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
def get_url(query: str, min_date: str, max_date: str) -> str:
return f'https://www.google.com/search?q={query}&tbm=nws&source-news&tbs=cdr:1,cd_min:{min_date},cd_max:{max_date}'
def save_to_csv(data: list) -> None:
with open(file='google_news.csv', mode='a', encoding="utf-8") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow([*data])
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
service = Service(executable_path="path/to/your/chromedriver.exe")
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 5)
yesterday = (datetime.now() - timedelta(1)).strftime("%m.%d.%Y")
driver.get(get_url('spent nuclear fuel', yesterday, yesterday))
url_locator = (By.CSS_SELECTOR, '#rso a')
title_locator = (By.CSS_SELECTOR, 'a div[role="heading"]')
thumbnail_locator = (By.CSS_SELECTOR, '#rso a>div>div:first-child img')
page = 1
while True:
print(f'Current page: {page}')
url_web_elements = wait.until(EC.visibility_of_all_elements_located(url_locator))
title_web_elements = wait.until(EC.presence_of_all_elements_located(title_locator))
thumbnail_web_elements = wait.until(EC.visibility_of_all_elements_located(thumbnail_locator))
titles = [title.text.replace(',', '.') for title in title_web_elements]
urls = [link.get_attribute('href') for link in url_web_elements]
thumbnails = [thumbnail.get_attribute('src') for thumbnail in thumbnail_web_elements]
for data in zip(titles, urls, thumbnails):
news = {
'title' : data[0],
'url' : data[1],
'thumbnail': data[2]
}
save_to_csv(news.values())
try:
page += 1
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#pnnext'))).click()
except TimeoutException:
break
driver.quit()
Output is csv file google_news.csv:
COP27: nuclear boss doesn't expect surge in waste recycling,https://news.yahoo.com/cop27-nuclear-boss-doesnt-expect-072631885.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAEAAwEBAQEAAAAAAAAAAAAGBAUHAgMAAf/EADcQAAEDAwIDBgQFAwUBAAAAAAECAwQABRESIQYxQRMUIlFhcTKBkdEHI0KhsTNS8CRFgrLBFf/EABoBAAIDAQEAAAAAAAAAAAAAAAMEAgUGAQD/xAAlEQACAgIBAwQDAQAAAAAAAAABAgADBBEhBRIxIiNBURMyYXH/2gAMAwEAAhEDEQA/AHNqb0oAIr3uctiMyvtFY25VHfuUW3xVLcWkECgabq5d+IWS6o9gF7IzXWtAMcxsGy1S3wJMXHcYlrm9noSTkUyt0tMqACDk4xUTidhItaVITgAiqe0zFwYLuUg4GEZ5Z6UMv7p3HmrFmGG+RPdt1TMiQmMCp0Z+HmmuU8Jy7i3mTNdbBGVIUSvP15e9XtqbbbjIwElSvEsjmSfOrdT2R4APYD7UAuWP8inYB/sFy+D1oYLTjwe5kaxmoUiyzrfFBD3aKCfENgPTHy8sGmUpx5eVKBCR686OXaSiVHejurU08nBC080nz9R9zXq22SpkbV7QGWGXQVwVHYYOCcnb5HeqyU4lMXSTueVdxWpnavtk/Ao4TglJ9v8ANqjXSFju5QsleCVoAI0eXP06VC3HUeoSywep2FfxNyYn4AtDXZKecAK1b5NNezSjwgDAo5wIvMTT5ZFJXFpCyCd6pbLHtbmBFaVOdTHf/pyLiouPLJT0TnYVNsgPfkK8lCqO3ZDY9qY8L2xUiOqSdhqOPlV1olpoCyV08+Jo11i95sq0gblG1Z02yp951tKvgZUSnzwMfyRWnwiZFuCefhrMb6pdovylAYQokEeaTsaPYvIaUWGxeq2keedS7snFdoYbYYdfWp8oBWAk6UEjkSNs1YcW3W4wbc0uIpplT6gEuBOvbzopYuF1z2Q7GlKjtNuFKlIG6xnV+4O/tWgS0xWwzFUtrQ22lASpQyRy2FDI0OImmydEQBbb9xA7JKe9rkFKQpTRi5QQeWCKQ3q3ynrb3tqPpkFA1ozz9s0raYioaQ8lAUkJynPPFVM+7pkOlpvSANjUWIUgzqqWBHxM0Uq3vPJDTkluaBpKQjAwOnPPpXN0elNhSXmylRGAPb+fPPrVtdLHHnXw6XA3I2KlJIBIJ238xg/4ah8UOlhLUVjDi4yezdVzOrnv9f4oljkjUlhVKHDGXH4fvhuKpThA3NcXviJTdwWhoApA86LWS6OsrLeceldyt3iTzNWnRelVhDdYN7lZ1LLIt7EMq4WOzBrWeBoqRw6gqG6wVfWsfgrwgD0ra+Cik2JgdOzFVlQ9Rml6nb7C6+5e2gBEXSemaCfibBy2mSgfCcH2pvb3B2i2x0NVfF8USra6gjmKM42sqcaw15IMLcC3dtmILetWCtWoeoOBUS4SeF7Zf3ZNzlCWUbd3P5nY533z7edGY7yoctt4A/lK3APTrSF6AmQ+3ItkK1qLnjEp6KFr3355570sp+4/lVBXJX5jFriONdGUot2SNGU6kFO2NtjUNmAtp1Tzx9QOlfWu1OQYq3pTxfeVlSlqPX09K8J94AZUwga1nbNCfkwCEAaEp+IJExuLKftzqmpAbOFo3IT1xnrRNqWWLclHZkrPiWo7kqO5J9aZRY6lMrQ9lSnU4IHPB6D/ADrVBKtTtnkSYM7GtlZ3/uT0V8xUyQU8Q+GPeOjo6hmDIUq47jG9Xz48fyo0y6lV8IRyzSd34h7Vtel8YqzK9TGso8w/BGU/Ktl4CcC7MgZ3AxWNwdhWlcDTuxUI5OArcVjkbtebXMp/JjHXxE8SR2d6WyT8Q1CrG5th1pQPLFGL2+Yd7jPjbOQaRqdD0UKBzkUdCCSJTZFRVUs+xMguMfRcZLSRzWSKl8M3SVb9MNB1pdWEIB/SSdsfWp13aaj3R1bvUchR1+SI9zjuo8KG3EuH5HNC/EzGO25lK1kNydcRhPlXMK7OQVBJONY5E+Xp865hxxgrfUCsDO3Snt5tUKUy488tLCCkqL2cAD1ofBl2SNIQ5IdckME4PZNEHA6qzjb23qD47k+mV6ZSa9XmIuFLPrInPp8I/pg/q9faqf8AFu3RlwI1zKcSkud3KgcakEFWD7Ebe5p3bLjAucJL9rkNvMDw5b/SR0I6e1A/xgkaYNvig7rcW4f+IAH/AGNM1VhBqK22M7d0yOLaHk3IOxyHmzz3GR96SOtryPArl5VVxVdmd/1bc6aWu4xYNsjNm1RJK1pK1uOpBOdRGP2q0p6kcdO3WxFjinIfzzM9jtrSsNKSQsK0lJ6EHBpbalORJ8QnbxgV4OW4p4qilbZS3IUF48iOf3+dLOKLXFj24zEulL7J8KQNiMj75qiCFuZr2zUrXR8EGfvGbjSER1LcR2v9ud8eeKo3eKJwjCOy4G0YwVjdR+1Gw+pwnJJwcZPvXKnMnbpTq1BTuZy3MssrFfgCTFv63NTisk9Sc5qBJ8UheRnwkYr8dS24U9oPEnkQSD9RXzmO1FTik17iJK7jwmwG3iYyG2nHlJP9U7aU+2cE/KgZhP3C4otsXlzcV/aKUuzu1/Du1JzgFaGsA4zpBz+6c1H4HHY8REKxofGNaiSSsbgDJ966P1M9rmKuGeFo9pQlbDjrboBClBXPfqOVUXGVkn8RXtbbZaQ1DjJSHFgpStZJVgHptjPyrQ86Ekqz4d+VBrlIiTJzvfuINLPaHRHjlOw6ZO+TQGfXMPXWGPMIP/h/eWWVu5jOaRnS29ufTfFTYjCGLZARMww8WAtTa8ZTqJVg/WrC4Mxf9nuD5cbxrbU4Clfp6Gq55sSVB2VGUXCB8XOl7LS3Bj1VKoe6LVXB/u6O7zICV6R+W4gjfqM5/wDKouIzOu9tcjMWlwyjqBeQtJbUMHlvvk46V62/uqn2mTboPjVoU4WAVkdTk9aSXaz29NpekCKgOoGEkE7dKZorD/sYhmZL0L7YBP8AeIB4V4KQpgv8Ql6MpzJbjrBRyOMqPP6VX8V8PMWnL0J1Rb1BKm1nJTnkQeoqbEuct68PQFPKEbQ4lKB+goBIKfI7b+eaPT7rKfjOtOqCkkJPtv0pvIoahu0ncSxsgXp3a1KpatLgIOxGDXzqhkHPOuXh/p1eoz9K8HlHb0TQIxGVuuBXw1FhqOSxNcIHoUgj91Gp1vfXHdQ8k+JtYWn3ByKIQFrEppIUdJTnT0z50jaOwzvtRk5Wc+ZthmNdwMwH8rsu1z6YzWLWi+xLc+XJFrZfkqcKi+rc7+h5dafokOH8N3nCfEIjqB7ZUP4rHHyStQpcr5EIGKnc025z094SiVCQ22vZtxlIGfPf71yuTFYwgOtKGMjW5pIHtXklwu8LMOuAKWpCCSR10k1nK5DnaLyrUSr9W9KFfVLJG9O5/9k="
UN Nuclear Chief Says Recycling Nuclear Waste 'Difficult ...,https://www.theepochtimes.com/un-nuclear-chief-says-recycling-nuclear-waste-difficult-after-biden-looks-to-fund-reprocessing-projects_4855151.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAcAAADAAMBAQEAAAAAAAAAAAAEBQYCAwcBCAD/xAA3EAABAwMCBQIEBAMJAAAAAAABAgMEAAUREiEGEzFBUSJhFDJxkUJSgbFiocEHFTNDctHi8PH/xAAaAQADAQEBAQAAAAAAAAAAAAACBAUDAQAG/8QAJREAAgICAwABAwUAAAAAAAAAAAECAwQREiExIhOB0RQyQWFx/9oADAMBAAIRAxEAPwBshrONqIbZz2rc01tRDbNVCeaEMe1eqj+KMS3ikfFt6VZ4zbcfT8S/nTn8I7nHehlJJbZ5Jt6QDceIrbAkfDuKW44M55ScgHxSxPFc9xzDFtHLPRS81JRyqRcHC0VPq1kBfXbNWlqiktp1J3xttU63LnHwpUYcZ+hcbiWMuS3Hmx1sLXsFLHpJ9jVH8M2tIUkZBpZIs8STF0PICxjcEdK84cfXElrtUhwrx6mCTupPj3Io8bL+o+MvQcrD+kuUfBmYoSMpG9DOMK8U+5NYLZB7U7on7J9bGrHprSqKCelPXI+nOBQi0J1b0R43so2FEpRQkdSthRKlLQNxQ7PG7TttXHP7SZLz3EshDJWOQhDZz9NW33rsCXcDcVyHi1Yb4uu6u5WkDPf0p/bFY3P4m1K+QviWR74JKy24rPoQlLmnUrGSad2UT+HLxGhTFKdZkt5SUnIRntv3Brbw/diplSXPkZRqUcZwKDau8+RxGxITbHHGQrGwJAT0GT0B361MluW9leEVHT2VV44kZtEtmPITracOFOAYV0/D5xW1otybtAejOpUlTjYSoe5J/YGmcSHDuUYvy4PLWUqSUvoGpIORj9xmtHD1p5HEQajgGNHSlRJ7DG365KvuaChJ2LXoWW2q3vwseXtWKm6K0ivCmrPI+fAFt0IuP6tgKbLRWkpGeookzwijvNKI3xR6HEkYwFCp9CCPlVmi2StJyM1wPQ2U0NOpvceDXPP7RbUG8XdsK1qIbeGNumyv5Afas77xxKaeWzaw2lCdueoaio+Ujpj71I3Lie7TIzkWVLU/Hc+ZK0pPv4zRSqcohVtxltBNvjqTw/JcaeS2665oQo9NhnfxTWzxeJ22EuRpdueSlOVMrURnByBnTg/XIqQtF2MN0MTBriqUVaeuD5H2rpnD1wtMiMgIebDhG3Yio9sZQb2WqHGS9CrfdH51t5rzC472MKbUnBBz+2xqt4TihuCuQQdb68lR7gbD+tRd0vMSOQlo89wYCtJ2IHbP61SWfi5ly0PSn4vIYj4QAjfPsKLFh8nMwzrdxUEVZSK1nSM4IpHbuILddXwzDkqDqhnQpJFNfhnOys0+SjBbigr2rQptKyVaqMIUhGF1q1MdyBRpnD5szcFnCnZP6o/5U4tUy5xo7kdLz5Q582tZ6eAO1ZR1JcbStJyFAEHyKKbG/WnlWvdnHY31oV3KWI6wZEBXLPRTS8fcEUwl2VqVwqLqyyY7jCgVZz621bAnPfcU4gMW91SVT0KeKTlLaRsD707uvJulpVbgwpqM4RzFa91AHOM9qm5OYlYlF9Io42L8G5LtnJP7uVLSptnK3sp0JHQ7/wDtUFt4faj25yXd5DsdLWSvlqGMD396s7VwhY4Ljc1hTyFoznLpUnoR0P1NZXrhu03mKiLHuhaIOrSNOlR7ah3H0NKZGTG2a49Iapx5VxfLtk3ardIuVvelt6GEoCXCHF40IOQkEnvtk0VInJTZ48KK/wAxsnW6rBCSr60NfrJdYNvXHfGuOpQUt9g5SUpBwDnAHsD460klsXNmDoYcblRBjCUpKVDxkdqdnWuPKHcdek35cmp+nR+AobjXPuDoCT8iM9ark3hSXNCdRP8Ap2pfwS609wvAU+0W3+UAtKxvmnLiNWySgg+K9FJIyl6alXVah2r1EplYy4E6qHciBI1KIAoYxFKOUOjFHpAnDbBLLkMNkDUz6c57dqcxFKUob5z1FTdudbijQgZJOVHHWqOI9rSlTaB7+1KXZU3BQXhQpx4KXJlPBjNpQlQGon8IopQW4rTpJSkfQUFapaUqAzuR3pgXlJXqD2kH8uOlTXsor+gi3qmI1gNAoz6f9qHevMELLdxtuMHqtms463Va1MzXFKx8qqDdvk+MdEyCVp8aQT9qKuDk/G/8OzmorthbjYmQHm7HLDjb6dDsMqB2P5Seh9j/ACqStbse2SH2L2iWhpKiG3W8HUc7BW2QfNUMRNllqVMjvrhvtEKVy1EYOfy4/wC4od2RJXHb5Nwg81ai6tSkEKKvB296p1Wumlpfh/cn2Vq21b/KGFv4kbjxm2UxZykAbLDClD9qcQb5Glq0olJS6P8ALcBQr+YqZbfvePTNgOgn5Q4Un9Nq3iZdAkIn2cSm87LQA5+u1Iu6xPpjX6eqS7iWKtTreUPpJ/KTvUnfbuqBMSyZCkEthWAnPc1uclt/BrbdZejIcRpzjoe30qRuVyjS5ObtAmqktjllcdWELA/EMjuc0xDJ5rT9FLMVVPkvCKt8hrmhDuAD3NOX7ozbSgAk58b1Ip9SwCe9OVMoVy9WSQOtDGrnNI6rHGOw53iV9Tw+GZKk/wARxRTN4nyRpaRpV/GdqCREZQNQTkjzWaZTrX+GQn6CnVhVr9xi8mz+Bwyicl3nJmcpaxnSEagD9xRiuJJreIt3ipfjY3WjcZ7HfcfpQLL7pYKyo6tPWlFtusp6Upp1SVoJIwoZrWWLR1paM45F3e3tD74uIvU6y84lPsvcD3Br8zEbuSVqQWFaU5K1qCaR3UfDPgs+nV1ryzkPSHGnQFIUncUxKTa4pLZhGKXyfg2fslySsLgvxktax6jKOB/UU3t0S9sDmLnxtJHqKXyr+lRN+1wHGxFeeQFdRzCfPmgBcp2jeW8fqqo99UuTT19inVdFLrZ1uQ7JVESlx9t1tW/gip+6WZE+Ql1bziCEBPpPXrSiySHoshLSXVOIWMkOb4z4qkceUlWwHSkmnBje1ZHTP//Z"
COP27: UN nuclear chief says radioactive waste recycling is 'difficult' technology,https://www.deccanherald.com/international/world-news-politics/cop27-un-nuclear-chief-says-radioactive-waste-recycling-is-difficult-technology-1161036.html,"data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBwgHBgkIBwgKCgkLDRYPDQwMDRsUFRAWIB0iIiAdHx8kKDQsJCYxJx8fLT0tMTU3Ojo6Iys/RD84QzQ5OjcBCgoKDQwNGg8PGjclHyU3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3N//AABEIAFwAXAMBIgACEQEDEQH/xAAbAAACAwEBAQAAAAAAAAAAAAAEBQIDBgEAB//EAEIQAAIBAwMBBgMFAwcNAAAAAAECAwAEEQUSITETFCJBUWEGcYEjMpGhwRVCsTNSotHi8PEWJTQ1Q0RicnOSk9Lh/8QAGQEAAwEBAQAAAAAAAAAAAAAAAAIDAQQF/8QAJhEAAgIBBAAFBQAAAAAAAAAAAAECEQMSEyExBEFCUZEUIjJSYf/aAAwDAQACEQMRAD8A02K7VdolwIF726PMcluzXCj2Hrj1qSTQtM8KSo0qAF0DAlQemR5VbUiVMlXqlj2r2K2wRxMA8jNEiSIAYUHFD12lasezjnc2cY+VRIqdcxWmEcV6pYrtBpxcg5FWCYjyFQr1Y1YCzud/fDOoT92hP+72rEE/80nX/tx8zR1rZ29nF2VrCkSdcIMZPqfU0MNNvJP9K1S491t0WIH+J/OjraBbeJYkaRlA6yOWJ5z1PzpI99GsznxdLNLNp+lQzvAt40jSyIcNsRd23PlmqYtCk0SX9o2d5N3FIC9xbNI2WIUngj3xTD4t0qW9tYrqzmEV3ZEyRkjhhjlfyoG30XXb3UYbjWmtjbwwtGlvayuAcjGWyOf8KupKuydMzukyXOqyy393MJ5WchYWkJ7NfZaN1PX5Y7eG2ttReOUzqrSBgdi5wQc+nJ5rkfwxr1jqk1zbwwT9orKHMoHB6Ej148qifhG6hTToO7rLOZzNdFnXayAr4RzzwT1xnNNcdV2LzVBdlqepQa89hDqY1iI27SKYwmQwHAJH08/Or01DW7TUtOgu7pJXvP5W37NR2WT5EeWOfoaei1jsLac6RpEcM7L4diRqCfLODSbQ7XU7G6M95o8k9zK/2l21yhKgnnC1yZ3qkqsvjVLkjLf69BrMGmd7tp5XYbtsGNq4ViT9CfwrW4FIdFtriX4g1TUbyFoycQxbiOB59PkK0OKMd02wkQxXsCpYr2KpYpkbObSby4W3VLuzuh962kuZY36eQ3YP0zTmTS3giWQ3WoRI3Qtcsfl97NZq7lt9ViuILu1SK5txnsLogNEcgbuo8jkEHHTniirhoJLSIWHeJbOM9m8rOsok5ORvUkHy9+BXLqr0lUv6EfE17aRW6RXNoJy0BAuHP3PLOQOpPy/Sgxruh/Y7tMkjMjhMB9u3wqcnkceICq/iS3az0+INMZUdH3TXdsFI4G1EGOB7n0pY945hmMtrZRyRQIXjks1+0YnocjgY5+lUWXGu48i7cn0xo3xFoSIHksdQjHP3bhhztDY4f0YV6bVtI/aVssS6gVZApIvJDzIUC4PaeR60LFMqTQZ0rRbiSVg3YPZbike0eInoPw8qC7qRdS28iaehgjlldokyFOdwXPng4C+g9hRu4nwohomvM1F5faTYyGN5NSO1SSyXrkDHUZ7Tk+3lVKa3pEhcRz6odhAJ7w3mcceLnn0rNpqaS20HeE0hi8LOXeLdtCjIBAfrknjr1wPWD6oS1w0NrZLEOzaR44WXdubjILdc7jmrrHH2E1M0NlrenLckd81INNcERhidrHcq49+SM1rHmMZ5P0xXzSyxb6rFHdwWcBd2d2Mbsy4YEDCMfQHzrXXd/BB2rRztPHAjNPKE2BSACQN3XqOn9dTyOMOhoLV2Npb0qSBV8VrfyoHSN9p6bmC/xpDp+raa13HLc3rPbxw75rMWzlyxwR4gMDH1zTr/ACw01Uj7tYStEVBXFmwx7dOfmOKnuDuJlU0y51V3j1aBbieM5jlRcHPXJIAAYEfX08qNs9Nu7YvIzTo0mO1lcLhipBGTjk/ocelXj4sjsrO4jsYXz9622jcZMHBBPPOD8+KTz/FF3qE1ssNqUAhCyiclVDMSTkeeQRXmPxGeStfB27MF2G63bjWIVa5u53eAMEBKrnjGSPkPzqi80UzZk75JL2oCXDNInKjnrjkjj0+dTtP2bFcdo0YklC+ErJwTg5zz0Pyo6b4ytbaWNI7ErCiDiKIYV9hwMjjG4ZxRLK7pIyMF2LYdOkhuJp7TV2t5JvvsCjbh6eWBQnw9E9ldSvDe47GSSPDIHWQeHkj369fKibXVYZ/tJ4reKTd4hIQWf33A4FC6bJbWzXBuGjdHumwfCUIIXHJ4qsJ/a2LKKuh3Jd3ct1bzG6t8W+7A7lwSQB/Ooe1u7mLTLWyF7bym2RQSlpv5UY5ORn+/Smek3mgWt8gljhnWZcrIV8Knb0UexyKCn+I9Julu+1Frhbd2RJFDrnGOh69c49qmvFydJX2P9OqbYM1sqTxXxu40laQkIYsYIU58+M5Hyx15qWp63oK2UtxGvbyyuI50lyd52AAhemMDGPb3oOLVdNM8dxFDDwMFuzxx08P0z7YOKOjfS0tb9IbYiFYo5fBGOd2wZHvyfxNM28vm69hIqMOKViZLhZ7q4lsIO7W8zEhiM7hxjjOc4+nPSrV0+4kz/nKWLbxsW1JA/pVXcX9qs3awbspwQUHiGevXrTfRkjvrZ5UnEKh9oUR7vIHOc+9G7ojyGmEpC/TY0j1K122MqsZ4/F2rH94delDXMUst7M72LkFzjxOAR0/T1ptYXEraxCWLrGLkZXcf5wHNXPMHkJQdAFIUn61bdj7HPpdUJrWELOHGnypwftMSEDg+vFFw2jnT5oxo8wzcRYQ9rkjbJz18v1ortlRWLHYc/dcnAq5bte7bUfLtIrBfQAHP8aN6P6htiiWwlRsfsV2HXK9r/wC3WvR6fKqr2mnNzIW7HDeEbQAfvA/nTftpJwG3MSBkD0rpkdCA+0of3geR/wDaVZkvSMoNJpC9rErF/qxlXOeY2PP/AJajqWk28VyssemReKKJiTET4jGpP+09SfxpnJNC5AI4zk58xir78bmjRWGxoYuT/wBNaZZlXEUY4S6tiWS0ZpGbuYYDjd2f9sf39aLitneK67GJQj2sfBHhYgx9fH5c/wBdFGKFUCEkggnJ9ahDOLSKQA8SHY2f3eQR/Ch503+KMWOubAO6FAO0tYUOOhTGf6dTR+xBVY0jBOcDjPvwaNvAkkIxIu5PI+YqENsHjDNJndyOPKlee/SvgFirzAJp3hbtEQFgSfCRweuahbzuIzy2zHIY9DVVnAsaPICzH0Y5FW20amMNzncwqJR9lkDRXAeO5zhuD1NESRRDbK8jhsEKOm38PKlSlo2LBs8FsEDFGXMjPYozdXByaA6QZbyxxqMMcqPNsnPnQs1wN4IYSKxJIQcgfpS2GR3UhmPD4H513aIJRs9M8+/WgLGAuIWlIZWyuCPMkY86sfUHmIZ154UNwNuwAY9+lKnch9w4ZTgEVxZpG+85PiPU5oC75LH1GbvOwldg/wCI+tU39/LJFKD/ACQkAOD0/KhlG5nyT91qGluJHCx5whPIApkhW+B1HdgyIQ2zK8gNwRUI75oV2KQQD5nFId7EQAknPr5VVduRO3ArdFmOR//Z"
Tested on Python 3.9.10. Used Selenium 4.5.0, openpyxl 3.0.10

How to run 'implicity_wait()' in a 'for loop' with respect to Web Scraping using Python?

Actually, I want to scrape the 'title' and 'product description' for all the products and from all the pages, and then save it into the '.csv' file.
URL:- hhttps://www.nykaa.com/makeup/body-art/c/3024?page_no=1&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
This is what, I have tried.
from msilib.schema import Error
from os import sep
from tkinter import ON
from turtle import goto
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import numpy as np
from random import randint
import pandas as pd
import requests
import csv
title_list = []
para_list = []
expiry_list = []
country_list = []
importer_list = []
address_list = []
myDict = {'body-art': 3024}
browser = webdriver.Chrome(
r'C:\Users\paart\.wdm\drivers\chromedriver\win32\97.0.4692.71\chromedriver.exe')
browser.maximize_window()
browser.implicitly_wait(20)
for item_name in myDict:
page_num = 1
while True:
try:
page = f"https://www.nykaa.com/makeup/{item_name}/c/{myDict[item_name]}?page_no={page_num}&sort=popularity&ptype=lst&id={myDict[item_name]}&root=nav_2&dir=desc&order=popularity&eq=desktop"
print(page)
requests.get(page)
soup = BeautifulSoup(requests.get(page).content, 'html.parser')
urls = [item.get("href")
for item in soup.find_all("a", class_="css-qlopj4")]
# print(urls)
if len(urls) == 0:
break
for i in range(0, 2): #Since, it's a huge amount of data, that's why I have taken 2 products on one page, otherwise it will be in the range(0,30). It will cover all the products from an individual pages.
try:
url = urls[i]
browser.get("https://www.nykaa.com" + url)
title_data = browser.find_elements(
By.CLASS_NAME, 'css-1gc4x7i').text
print(title_data)
for t in title_data:
title_list.append(t)
browser.execute_script("document.body.style.zoom='50%'")
browser.execute_script("document.body.style.zoom='100%'")
# Creates "load more" button object.
browser.implicitly_wait(20)
loadMore = browser.find_element(
By.XPATH, "/html/body/div[1]/div/div[3]/div[1]/div[2]/div/div/div[2]")
loadMore.click()
browser.implicitly_wait(20)
desc_data = browser.find_elements(By.ID, 'content-details')
for desc in desc_data:
para_details = browser.find_element(By.XPATH,
'//*[#id="content-details"]/p[1]').text
para_list.append(para_details)
expiry = browser.find_element(By.XPATH,
'//*[#id="content-details"]/p[2]').text
expiry_list.append(expiry)
country = browser.find_element(By.XPATH,
'//*[#id="content-details"]/p[3]').text
country_list.append(country)
importer = browser.find_element(By.XPATH,
'//*[#id="content-details"]/p[4]').text
importer_list.append(importer)
address = browser.find_element(By.XPATH,
'//*[#id="content-details"]/p[5]').text
address_list.append(address)
except:
break
except:
break
page_num += 1
title_list = [i.split('.css', 1)[0] for i in title_list]
print(*title_list, sep="\n")
print(*para_list, sep="\n")
print(*expiry_list, sep="\n")
print(*country_list, sep="\n")
print(*importer_list, sep="\n")
print(*address_list, "\n")
data_new = {"Title": title_list, "Para": para_list, "Expiry": expiry_list,
"Country": country_list, "Importer": importer_list, "Address": address_list}
df = pd.DataFrame(data_new)
df.to_csv("nykaa_makeup_bodyArt_new.csv")
# print(df)
The Output, I am receiving is as:
DevTools listening on ws://127.0.0.1:30887/devtools/browser/a222842a-7ce3-4070-a684-7e8bb8772279
https://www.nykaa.com/makeup/body-art/c/3024?page_no=1&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
https://www.nykaa.com/makeup/body-art/c/3024?page_no=2&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
https://www.nykaa.com/makeup/body-art/c/3024?page_no=3&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
https://www.nykaa.com/makeup/body-art/c/3024?page_no=4&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
https://www.nykaa.com/makeup/body-art/c/3024?page_no=5&sort=popularity&ptype=lst&id=3024&root=nav_2&dir=desc&order=popularity&eq=desktop
PS E:\Web Scraping - Nykaa>
I think, due to the implicity_wait() function, it's not able to fetch the product's title & description. After my code runs, the '.csv' file is created, but it's a blank file. Maybe, I am wrong. Please help me regarding this. Do I need change to add/change some parts of the code?
Thanks 🙏🏻
There is no need to set browser.implicitly_wait multiple times.
browser.implicitly_wait is setting the timeout, how much time the driver will try to pool the DOM in order to locate an element on the page before it races exception.
browser.implicitly_wait is normally set per driver session.
This is definetely not a pause command like time.sleep.
So, in case you need to put a pause in your code you should use time.sleep while this is not recommended.
Also, it's much preferably to use Expected Conditions explicit waits rather than browser.implicitly_wait since browser.implicitly_wait waits for element presence i.e. it will release the run when element is just appeared while it may not be completely rendered.
In order to wait for element completely rendered and containing it text you should use something like
wait.until(EC.visibility_of_element_located((By.XPATH, "/html/body/div[1]/div/div[3]/div[1]/div[2]/div/div/div[2]")))
Where "/html/body/div[1]/div/div[3]/div[1]/div[2]/div/div/div[2]" is XPath of element you wishe to get the text from.

Already complete scraping scrapes everything on the page. I would like to limit the scraping to only a certain section

I placed the code of a complete and properly functioning scraping that I own. Successfully scrapes all elements on the page.
However, I would like to scrape only a small limited section of the page with the same elements as scraping. This limited section is already scraped correctly along with all elements of the page, but I would like to scrape only it and not "all + it". The link is here
There are 4 tables on the page, but I would like to scrape just one, that is the table called "Programma", ie the html section "event-summary event" or "leagues-static event-summary-leagues ". But of this section only the elements of the last round (Matchday 14). Matchday 14 only. No round 15. So obviously that with each update of the page rounds, the last round is always scraped as well.
So I would need to insert something that makes scraping understand to download only the elements (which it already owns and scrapes) of of that section and the last round.
The code is already complete and works fine, so I'm not looking for code services, but for a little hint to tell me how to limit the scraping to just the section mentioned above. Scraping is in Selenium. I would like to stick with Selenium and my code as it is already functional and complete. Thanks
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get("url")
driver.implicitly_wait(12)
#driver.minimize_window()
wait = WebDriverWait(driver, 10)
all_rows = driver.find_elements(By.CSS_SELECTOR, "div[class^='event__round'],div[class^='event__match']")
current_round = '?'
for bundesliga in all_rows:
classes = bundesliga.get_attribute('class')
#print(classes)
if 'event__round' in classes:
#round = row.find_elements(By.CSS_SELECTOR, "[class^='event__round event__round--static']")
#current_round = row.text # full text `Round 20`
current_round = bundesliga.text.split(" ")[-1] # only `20` without `Round`
else:
datetime = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__time']")
#Divide la data e l'ora
date, time = datetime.text.split(" ")
date = date.rstrip('.') # right-strip to remove `.` at the end of date
team_home = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__participant event__participant--home']")
team_away = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__participant event__participant--away']")
score_home = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--home']")
score_away = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--away']")
bundesliga = [current_round, date, time, team_home.text, team_away.text, score_home.text, score_away.text]
bundesliga.append(bundesliga)
print(bundesliga)
I think all you need to do is limit all_rows variable. One way to do this is finding the tab you are looking for with text and then getting the parent elements.
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
driver = webdriver.Firefox()
driver.get("https://www.someurl/some/other/page")
driver.implicitly_wait(12)
#driver.minimize_window()
wait = WebDriverWait(driver, 10)
# all_rows = driver.find_elements(By.CSS_SELECTOR, "div[class^='event__round'],div[class^='event__match']")
############### UPDATE ####################
def parent_element(element):
return element.find_element(By.XPATH, './..')
programma_element = WebDriverWait(driver, 10).until(
EC.visibility_of_element_located((By.XPATH, "//div[text()='Programma']")))
programma_element_p1 = parent_element(programma_element)
programma_element_p2 = parent_element(programma_element_p1)
programma_element_p3 = parent_element(programma_element_p2)
all_rows = programma_element_p3.find_elements(By.CSS_SELECTOR, "div[class^='event__round'],div[class^='event__match']")
filter_rows = []
for row in all_rows:
if "event__match--last" in row.get_attribute('class'):
filter_rows.append(row)
break
else:
filter_rows.append(row)
############### UPDATE ####################
current_round = '?'
for bundesliga in filter_rows:
classes = bundesliga.get_attribute('class')
#print(classes)
if 'event__round' in classes:
#round = row.find_elements(By.CSS_SELECTOR, "[class^='event__round event__round--static']")
#current_round = row.text # full text `Round 20`
current_round = bundesliga.text.split(" ")[-1] # only `20` without `Round`
else:
datetime = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__time']")
#Divide la data e l'ora
date, time = datetime.text.split(" ")
date = date.rstrip('.') # right-strip to remove `.` at the end of date
team_home = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__participant event__participant--home']")
team_away = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__participant event__participant--away']")
# score_home = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--home']")
# score_away = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--away']")
try:
score_home = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--home']")
except (TimeoutException, NoSuchElementException):
MyObject = type('MyObject', (object,), {})
score_home = MyObject()
score_home.text = "-"
try:
score_away = bundesliga.find_element(By.CSS_SELECTOR, "[class^='event__score event__score--away']")
except (TimeoutException, NoSuchElementException):
MyObject = type('MyObject', (object,), {})
score_away = MyObject()
score_away.text = "-"
bundesliga = [current_round, date, time, team_home.text, team_away.text, score_home.text, score_away.text]
bundesliga.append(bundesliga)
print(bundesliga)

Python Scraping from website

I've tried to write a web scraper for https://www.waug.com/area/?idx=15:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.abcd.com/area/?abc=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
names = soup.select('#good_{} > div > div.class_name > div > div'.format(count))
prices = soup.select('#good_{} > div > div.class_name > div.class_name'.format(count))
for name in names:
while count < 45:
print(name.text)
count = count + 1
for price in prices:
while count < 45:
print(price.text)
count = count + 1
The output is only 45 times first item name and no price. How can I get all item name and price? I want to get item name and price on same line. (I've changed the url and some of the class names just in case)
In order to be sure to get the right name for the right title I'd get the whole "item-good" class.
Then using a for loop would allow me to be sure that the title I am getting matches its price.
Here's an example of how to parse a website with BeautifulSoup:
#!/usr/bin/env python3
#_*_coding:utf8_*_
import requests
from bs4 import BeautifulSoup
url = requests.get('https://www.waug.com/area/?idx=15')
html = url.text
soup = BeautifulSoup(html, 'html.parser')
count = 1
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors delete the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
As per OP's request this is not enough because there is a "more" button to push in the webpage in order to retrieve all the results.
This can be done using Selenium Webdriver.
=== IMPORTANT NOTE ===
In order to make this work you'll need to copy in your script folder also the "chromedriver" file.
You can download it from this Google website.
Here's the script:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
for number in range(10):
try:
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
more_button = browser.find_element_by_id('more_good')
more_button.click()
time.sleep(10)
except:
print "Scrolling is now complete!"
source = browser.page_source
# This source variable should be used as input for BeautifulSoup
print source
Now it is tie to merge the two explained soultions in order to get the final requested result.
Please keep it mind that this is just a quick'n'dirty hack and needs proper error handling and polishing but it should be enough to get you started:
#!/usr/bin/env python3
#_*_coding:utf8_*_
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get('https://www.waug.com/area/?idx=15')
def is_page_load_complete():
close_button = browser.find_element_by_id('close_good');
return close_button.is_displayed();
while(True):
WebDriverWait(browser, 60).until(EC.presence_of_element_located((By.ID, "more_good")))
time.sleep(10)
more_button = browser.find_element_by_id('more_good')
if (more_button.is_displayed()):
more_button.click()
else:
if (is_page_load_complete()):
break
source = browser.page_source
soup = BeautifulSoup(source, 'html.parser')
items = soup.findAll("div", {"class": "item-good"})
for item in items:
item_title = item.find("div", {"class": "good-title-text"})
item_price = item.find("div", {"class": "price-selling"})
print item_title.text + " " + item_price.text
# If you get encoding errors comment the row above and uncomment the one below
#print item_title.text.encode("utf-8") + " " + item_price.text.encode("utf-8")
print "Total items found: " + str(len(items))

Categories

Resources