How to break while loop when scraping pages in opensea - python

I'm trying to scrap opnesea.io, I have code which scrapes all pages, but I need only first five page for scraping, so I have try to break loop, but it doesn't do it.
time.sleep(2) # Allow 2 seconds for the web page to open
data = []
path = ChromeDriverManager().install()
url = 'https://opensea.io/rankings?sortBy=seven_day_volume'
driver = webdriver.Chrome(path)
driver.get(url)
start = time.time()
def scroll():
""" Get urls from Opensea.io """
global data
scroll_pause_time = 1
screen_height = driver.execute_script("return window.screen.height;")
i = 1
num = 0
while num True:
driver.execute_script("window.scrollTo(0, {screen_height}*{i});".format(screen_height=screen_height, i=i))
i += 1
time.sleep(scroll_pause_time)
main_url = 'https://opensea.io'
# update scroll height each time after scrolled, as the scroll height can change after we scrolled the page
scroll_height = driver.execute_script("return document.body.scrollHeight;")
soup = BS(driver.page_source, 'html.parser')
divs = soup.find_all('a', class_='sc-1pie21o-0 elyzfO sc-1xf18x6-0 sc-1twd32i-0 sc-1idymv7-0 dGptxx kKpYwv iLNufV fresnel-lessThan-xl')
for items in divs:
link = main_url + items['href']
print(link)
d = {'link' : link}
print('Done!')
data.append(d)
if (screen_height) * i > scroll_height:
el = driver.find_element_by_xpath('//*[#id="main"]/div/div[3]/button[2]').click()
time.sleep(7)
scroll()
num += 1
if num == 5:
return
scroll()
print('Done ----> Opensea.io urls')
So, you can see I use recursion for my task, I know that using a while loop and recursion at same time is not good idea, but only in this way it's scraping more than one page.

Add a parameter to the function and create a global variable outside of the function called pages and pass it the function.
Check if it's less than 5 using if statement and increment it before a recursion.
Like below:
time.sleep(2) # Allow 2 seconds for the web page to open
data = []
path = ChromeDriverManager().install()
url = 'https://opensea.io/rankings?sortBy=seven_day_volume'
driver = webdriver.Chrome(path)
driver.get(url)
start = time.time()
#create a new integer to count the number of recursions outside of the function
pages = 0
#pass it to the function
def scroll(pages):
""" Get urls from Opensea.io """
global data
scroll_pause_time = 1
screen_height = driver.execute_script("return window.screen.height;")
i = 1
while num True:
driver.execute_script("window.scrollTo(0, {screen_height}*{i});".format(screen_height=screen_height, i=i))
i += 1
time.sleep(scroll_pause_time)
main_url = 'https://opensea.io'
# update scroll height each time after scrolled, as the scroll height can change after we scrolled the page
scroll_height = driver.execute_script("return document.body.scrollHeight;")
soup = BS(driver.page_source, 'html.parser')
divs = soup.find_all('a', class_='sc-1pie21o-0 elyzfO sc-1xf18x6-0 sc-1twd32i-0 sc-1idymv7-0 dGptxx kKpYwv iLNufV fresnel-lessThan-xl')
for items in divs:
link = main_url + items['href']
print(link)
d = {'link' : link}
print('Done!')
data.append(d)
if (screen_height) * i > scroll_height and pages < 5:
el = driver.find_element_by_xpath('//*[#id="main"]/div/div[3]/button[2]').click()
time.sleep(7)
#incremnt it before every recursion
pages += 1
scroll(pages)
return

Related

Not scrolling down in a website having dynamic scroll

I'm scraping news-articles from a website where there is no load-more button in a specific category page, the news article links are being generated as I scroll down. I wrote a function which take input category_page_url and limit_page(how many times I want to scroll down) and return me back all the links of the news articles displayed in that page.
Category page link = https://www.scmp.com/topics/trade
def get_article_links(url, limit_loading):
options = webdriver.ChromeOptions()
lists = ['disable-popup-blocking']
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "normal"
options.add_argument("--window-size=1920,1080")
options.add_argument("--disable-extensions")
options.add_argument("--disable-notifications")
options.add_argument("--disable-Advertisement")
options.add_argument("--disable-popup-blocking")
driver = webdriver.Chrome(executable_path= r"E:\chromedriver\chromedriver.exe", options=options) #add your chrome path
driver.get(url)
last_height = driver.execute_script("return document.body.scrollHeight")
loading = 0
while loading < limit_loading:
loading += 1
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(8)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
article_links = []
bsObj = BeautifulSoup(driver.page_source, 'html.parser')
for i in bsObj.find('div', {'class': 'content-box'}).find('div', {'class': 'topic-article-container'}).find_all('h2', {'class': 'article__title'}):
article_links.append(i.a['href'])
return article_links
Assuming I want to scroll 5 times in this category page,
get_article_links('https://www.scmp.com/topics/trade', 5)
But even if I change the number of my limit_page it return me back only the links from first page, there is some mistake I've done to write the scrolling part. Please help me with this.
Instead of scrolling using per body scrollHeight property, I checked to see if there was any appropriate element after the list of articles to scroll to. I noticed this appropriately named div:
<div class="topic-content__load-more-anchor" data-v-db98a5c0=""></div>
Accordingly, I primarily changed the while loop in your function get_article_links to scroll to this div using location_once_scrolled_into_view after finding the div before the loop starts, as follows:
loading = 0
end_div = driver.find_element('class name','topic-content__load-more-anchor')
while loading < limit_loading:
loading += 1
print(f'scrolling to page {loading}...')
end_div.location_once_scrolled_into_view
time.sleep(2)
If we now call the function with different limit_loading, we get different count of unique news links. Here are couple of runs:
>>> ar_links = get_article_links('https://www.scmp.com/topics/trade', 2)
>>> len(ar_links)
scrolling to page 1...
scrolling to page 2...
90
>>> ar_links = get_article_links('https://www.scmp.com/topics/trade', 3)
>>> len(ar_links)
scrolling to page 1...
scrolling to page 2...
scrolling to page 3...
120

How do I scrape a website with an infinite scroller?

The code below is what I have so far, but it only pulls data for the first 25 items, which are the first 25 items on the page before scrolling down for more:
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
start_time = time.time()
s = requests.Session()
#Get URL and extract content
response = s.get('https://www.linkedin.com/jobs/search?keywords=It%20Business%20Analyst&location=Boston%2C%20Massachusetts%2C%20United%20States&geoId=102380872&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0')
soup = BeautifulSoup(response.text, 'html.parser')
items = soup.find('ul', {'class': 'jobs-search__results-list'})
job_titles = [i.text.strip('\n ') for i in items.find_all('h3', {'class': 'base-search-card__title'})]
job_companies = [i.text.strip('\n ') for i in items.find_all('h4', {'class': 'base-search-card__subtitle'})]
job_locations = [i.text.strip('\n ') for i in items.find_all('span', {'class': 'job-search-card__location'})]
job_links = [i["href"].strip('\n ') for i in items.find_all('a', {'class': 'base-card__full-link'})]
a = pd.DataFrame({'Job Titles': job_titles})
b = pd.DataFrame({'Job Companies': job_companies})
c = pd.DataFrame({'Job Locations': job_locations})
value_counts1 = a['Job Titles'].value_counts()
value_counts2 = b['Job Companies'].value_counts()
value_counts3 = c['Job Locations'].value_counts()
l1 = [f"{key} - {value_counts1[key]}" for key in value_counts1.keys()]
l2 = [f"{key} - {value_counts2[key]}" for key in value_counts2.keys()]
l3 = [f"{key} - {value_counts3[key]}" for key in value_counts3.keys()]
data = l1, l2, l3
df = pd.DataFrame(
data, index=['Job Titles', 'Job Companies', 'Job Locations'])
df = df.T
print(df)
print("--- %s seconds ---" % (time.time() - start_time))
I would like to pull data for more than the first 25 items, is there an efficient way of being able to do this?
Get the container that holds the desired data by inspecting and you can scrape from the infinite scroll page with Selenium web driver using window.scrollTo()
check this for more >
crawl site that has infinite scrolling using python
or this web-scraping-infinite-scrolling-with-selenium
The best way is to create a function to scroll down:
# Scroll function
# This function takes two arguments. The driver that is being used and a timeout.
# The driver is used to scroll and the timeout is used to wait for the page to load.
def scroll(driver, timeout):
scroll_pause_time = timeout
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(scroll_pause_time)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
# If heights are the same it will exit the function
break
last_height = new_height
Then you can use the scroll function to scroll desidered page:
import time
import pandas as pd
from seleniumwire import webdriver
# Create a new instance of the Firefox driver
driver = webdriver.Firefox()
# move to some url
driver.get('your_url')
# use "scroll" function to scroll the page every 5 seconds
scroll(driver, 5)

BeautifulSoup not scraping next pages

driver = webdriver.Chrome(ChromeDriverManager().install())
url = 'www.mywebsite.com'
driver.get(url)
response = requests.get(url)
markup = driver.page_source
soup = BeautifulSoup(markup, 'lxml')
for _ in range(50):
driver.find_element_by_tag_name('body').send_keys(Keys.END) # Move the page down
element = driver.find_element_by_class_name('prevnext')
master_list = []
for name in soup.find_all(itemprop='name'):
data_dict = {}
data_dict['company name'] = name.get_text(strip=True, separator = '\n')
master_list.append(data_dict)
df = pd.DataFrame(master_list)
print('Page scraped')
time.sleep(5)
print('Sleeping for 2..')
print('Is the button enabled : ' + str(element.is_enabled()))
print('Is the button visible : ' + str(element.is_displayed()))
element.click();
print('Clicked Next')
driver.implicitly_wait(2)
# # for _ in range(1):
# # print('waiting 10')
# driver.find_element_by_class_name('submit-btn').click()
print('Finished Scraping')
I Need this to run through 50 pages. It scrapes the first one, and flips through the other ones. However, at the end only the first one is scraped and added to df. Every page has 20 records. I believe my indentation is wrong. Any help appreciated.
It seems you made a small mistake.
markup = driver.page_source
soup = BeautifulSoup(markup, 'lxml')
Remove this line from your code and add it to start of your for loop as you would also need to get the source every time you click because new content is loaded everytime.

Scraping all comments under an Instagram post

my previous questions was closed, but the suggested answer doesn't help me. Instagram comments has a very specific behaviour! I know how to programatically scroll a website down, but with the comments on Instagram is a bit different! I would appreciate if my question was not closed immediately because it really doesn't help. Woule ba grateful for help and not shutting me down! Thank you.
Here it is again:
I am trying to build a scraper that is saving the comments under an Instagram post. I manage to log in to the instagram through my code so I can access all comments under a post, but I seem to cannot scroll down enough times to view all comments in order to scrape all of them. I only get around 20 comments everytime.
Can anyone please help me? I am using selenium webdriver.
Thank you for your help in advance! Will be greatfull.
This is my function for saving the comments:
import time
from selenium.webdriver.firefox.options import Options
from selenium.webdriver import Firefox
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class Instagram_comments():
def __init__(self):
self.firefox_options = Options()
self.browser = Firefox(options=self.firefox_options)
def get_comments(self, url):
self.browser.get(url)
time.sleep(3)
while True:
try:
self.load_more_comments = self.browser.find_element_by_class_name(
'glyphsSpriteCircle_add__outline__24__grey_9')
self.action = ActionChains(self.browser)
self.action.move_to_element(self.load_more_comments)
self.load_more_comments.click()
time.sleep(4)
self.body_elem = self.browser.find_element_by_class_name('Mr508')
for _ in range(100):
self.body_elem.send_keys(Keys.END)
time.sleep(3)
except Exception as e:
pass
time.sleep(5)
self.comment = self.browser.find_elements_by_class_name('gElp9 ')
for c in self.comment:
self.container = c.find_element_by_class_name('C4VMK')
self.name = self.container.find_element_by_class_name('_6lAjh').text
self.content = self.container.find_element_by_tag_name('span').text
self.content = self.content.replace('\n', ' ').strip().rstrip()
self.time_of_post = self.browser.find_element_by_xpath('//a/time').get_attribute("datetime")
self.comment_details = {'profile name': self.name, 'comment': self.content, 'time': self.time_of_post}
print(self.comment_details)
time.sleep(5)
return self.comment_details
This chunk worked multiple times for me:
def scroll():
SCROLL_PAUSE_TIME = 1
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
print('page height:', new_height)
last_height = new_height
scroll()
and for some sites you will need to scrape as you scroll as not all elements will appear when you get to the bottom(such as twitter).
This is what my code looked like for twitter:
account_names = []
account_tags = []
account_link = []
def scroll():
SCROLL_PAUSE_TIME = 1
global account_name
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
account_name = driver.find_elements_by_xpath('//*[#id="react-root"]/div/div/div/main/div/div/div/div/div/div[2]/div/div/section/div/div/div/div/div/div/div/div[2]/div[1]/div[1]/a/div/div[1]/div[1]/span/span')
for act_name in account_name:
global acctname
acctname = act_name.text
account_names.append(acctname)
account_handle = driver.find_elements_by_xpath('//*[#id="react-root"]/div/div/div/main/div/div/div/div/div/div[2]/div/div/section/div/div/div/div/div/div/div/div[2]/div[1]/div[1]/a/div/div[2]/div/span')
for act_handle in account_handle:
global account_tags
acct_handles = act_handle.text
account_tags.append(acct_handles)
soup = BeautifulSoup(driver.page_source, 'lxml')
account_links = soup.find_all('a', href=True, class_='css-4rbku5 css-18t94o4 css-1dbjc4n r-1loqt21 r-1wbh5a2 r-dnmrzs r-1ny4l3l')
for acct_links in account_links:
global act_link
act_link = acct_links['href']
account_link.append(act_link)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
scroll()
Just a note: as another user commented, Instagram is very difficult to scrape because of the dynamic html variables, so they would be correct to say no one, myself included is too interested in writing that for instagram.
The first function returns True if an element is found
The second function is used to scroll to the bottom till the last comment and then BeautifulSoup is used to scrape all comments
def check_exists_by_xpath(self,xpath):
try:
self.driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
def get_comments():
while self.check_exists_by_xpath("//div/ul/li/div/button"):
load_more_comments_element = self.driver.find_element_by_xpath("//div/ul/li/div/button")
load_more_comments_element.click()
sleep(1)
sleep(2)
soup = BeautifulSoup(self.driver.page_source,'lxml')
comms = soup.find_all('div',attrs={'class':'C4VMK'})
print(len(comms))
soup_2 = BeautifulSoup(str(comms),'lxml')
spans = soup_2.find_all('span')
comments = [i.text.strip() for i in spans if i != '']
print(comments)
I hope this helps - be aware I'm also still learning.
This worked for me, this programmatically clicks the "load more" button, as many times it is displayed.
try:
load_more_comment = driver.find_element_by_css_selector('.MGdpg > button:nth-child(1)')
print("Found {}".format(str(load_more_comment)))
while load_more_comment.is_displayed():
load_more_comment.click()
time.sleep(1.5)
load_more_comment = driver.find_element_by_css_selector('.MGdpg > button:nth-child(1)')
print("Found {}".format(str(load_more_comment)))
except Exception as e:
print(e)
pass

Selenium: scroll down of page and parse with python

I try to parse page ozon.ru
And I have some problem.
I should scroll the page and next get all html code.
But I scroll page, the height is changing, but results of parsing is wrong, because it returns result only from first page.
I can't understand, I should update html code of page and how can I do that?
def get_link_product_ozon(url):
chromedriver = "chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
driver.get(url)
i = 0
last_height = driver.execute_script("return document.body.scrollHeight")
while i < 80:
try:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
new_height = driver.execute_script("return document.body.scrollHeight")
i += 1
last_height = new_height
except:
time.sleep(3)
continue
soup = BeautifulSoup(driver.page_source, "lxml")
all_links = soup.findAll('div', class_='bOneTile inline jsUpdateLink mRuble ')
for link in all_links:
print(link.attrs['data-href'])
driver.close()
Those divs loaded after scrolling don't have class mRuble and you are doing exact string matching. Maybe try something like:
all_links = soup.select('div.bOneTile.inline.jsUpdateLink')
all_links = soup.select('div[data-href]')
...

Categories

Resources