I am trying to get movie reviews from Fandango website. Even when I hit the URL for the second page onwards of movie reviews for a particular movie I keep getting the first page. Do I need to send cookies with requests?
Below is my code snippet:
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
baseUrl = 'https://www.fandango.com/movie-reviews'
req = Request(baseUrl, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, 'html.parser')
# Getting all the movie links from the first page
movieLinks = soup.find_all("a", class_='dark')
# Get reviews for every movie
for i in range(2):#len(movieLinks)
try:
movieName = movieLinks[i].text.replace(' Review', '')
count = 1
print('\n\n****** ' + movieName + ' ********\n\n')
# Getting movie reviews from first 10
for j in range(3):
pageNum = j + 1;
movieReviewUrl = movieLinks[i]['href'] + '?pn=' + str(pageNum)
print('Hitting URL: ' + movieReviewUrl)
revReq = Request(movieReviewUrl, headers = {'User-Agent': 'Mozilla/5.0'})
revWebpage = urlopen(revReq).read()
revSoup = BeautifulSoup(revWebpage, 'html.parser')
revArr = revSoup.find_all("p", class_ = "fan-reviews__item-content")
for k in range(len(revArr)):
if len(revArr[k])>0:
print(str(count) + ' : ' + revArr[k].text)
count = count + 1
except:
print('Error for movie: ' + movieName)
I suggest using Requests, it's much easier to handle such requests with it.
from bs4 import BeautifulSoup
import requests
baseUrl = 'https://www.fandango.com/movie-reviews'
# req = Request(baseUrl, headers={'User-Agent': 'Mozilla/5.0'})
webpage = requests.get(baseUrl).text
soup = BeautifulSoup(webpage, 'html.parser')
# Getting all the movie links from the first page
movieLinks = soup.find_all("a", class_='dark')
# Get reviews for every movie
for i in range(2):#len(movieLinks)
try:
movieName = movieLinks[i].text.replace(' Review', '')
count = 1
print('\n\n****** ' + movieName + ' ********\n\n')
# Getting movie reviews from first 10
for j in range(3):
pageNum = j + 1;
movieReviewUrl = movieLinks[i]['href'] + '?pn=' + str(pageNum)
print('Hitting URL: ' + movieReviewUrl)
# revReq = Request(movieReviewUrl, headers = {'User-Agent': 'Mozilla/5.0'})
# revWebpage = urlopen(revReq).read()
revWebpage = requests.get(movieReviewUrl).text
revSoup = BeautifulSoup(revWebpage, 'html.parser')
revArr = revSoup.find_all("p", class_ = "fan-reviews__item-content")
print(len(revArr))
for k in range(len(revArr)):
if len(revArr[k])>0:
print(str(count) + ' : ' + revArr[k].text)
count = count + 1
except:
print('Error for movie: ' + movieName)
When you run it, you can see that revArr is returning 0, so please check "fan-reviews__item-content".
Related
This is my code. It is web scraping page by page and extracting the data to Excel. It is taking the next page link by extracting the anchor tag present in pagination of the current page.
Currently it is slow; can someone please help to make it fast by using multithreading or anything else?
import requests
from urllib3.exceptions import InsecureRequestWarning
import csv
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs
f = csv.writer(open('GEM.csv', 'w', newline=''))
f.writerow(['Bidnumber', 'Items', 'Quantitiy', 'Department', 'Enddate','pageNumber'])
def scrap_bid_data():
page_no = 1
url = ""
while page_no <= 532:
print('Hold on creating URL to fetch data for...'+str(page_no))
if page_no == 2:
url = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' + "AMCR24yMNFkfoXF3wKPmGMy_wV8TJPAlxm6oWiTHGOI"
if page_no == 1:
url = 'https://bidplus.gem.gov.in/bidlists?bidlists'
print('URL created: ' + url)
scraped_data = requests.get(url, verify=False)
soup_data = bs(scraped_data.text, 'lxml')
nextlink = soup_data.find('a', {'rel': 'next'})
nxt = nextlink['href'].split('=')[1]
extracted_data = soup_data.find('div', {'id': 'pagi_content'})
if len(extracted_data) == 0:
break
else:
for idx in range(len(extracted_data)):
if (idx % 2 == 1):
bid_data = extracted_data.contents[idx].text.strip().split('\n')
if (len(bid_data) > 1):
print(page_no)
if (len(bid_data[8]) > 1 and len(bid_data[10].split(':')) > 1):
bidno = bid_data[0].split(":")[-1]
items = bid_data[9].strip().split('Items:')[-1]
qnty = int(bid_data[10].split(':')[1].strip())
dept = (bid_data[11] + bid_data[16].strip()).split(":")[-1]
edate = bid_data[21].split("End Date:")[-1]
f.writerow([bidno, items, qnty, dept, edate,page_no])
page_no=page_no+1
url = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' +nxt
print('printing the next url')
print(url)
scrap_bid_data()
I am trying to get a Product List of a website with selenium. I prototyped the program and everything worked perfectly but now I built a loop to get all products and it just gives me the same product 484 times(that's the number of products there are on the website)
Here is my code:
from bs4 import BeautifulSoup as soup # HTML data structure
from urllib.request import urlopen as uReq # Web client
import selenium
from selenium import webdriver
# URl to web scrape from
page_url = "https://www.smythstoys.com/at/de-at/spielzeug/lego/c/SM100114"
driver = webdriver.Chrome()
driver.get(page_url)
buttonName = "loadMoreProducts"
loadMoreButton = driver.find_element_by_id(buttonName)
while loadMoreButton is not None:
try:
try:
loadMoreButton.click()
except selenium.common.exceptions.ElementNotInteractableException:
break
except selenium.common.exceptions.ElementClickInterceptedException:
break
uClient = uReq(page_url)
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# gets all products
containers = driver.find_elements_by_tag_name('article')
print(len(containers))
# name the output file to write to local disk
out_filename = "smythstoys_product_data.csv"
# header of csv file to be written
headers = "product_name;price; info \n"
# opens file, and writes headers
f = open(out_filename, "w")
f.write(headers)
# loops trough all products
# -----------------------------------------------------------------------
# here is the problem:
for container in driver.find_elements_by_tag_name('article'):
print("----------------------------------------------------------------------")
product_name_container = container.find_element_by_xpath("//h2[#class ='prodName trackProduct']")
product_name = product_name_container.text
print(product_name)
price_container = container.find_element_by_xpath("//div[#class ='price']")
price = price_container.text
print("price:", price)
# ------------------------------------------------------------------------------------
try:
info_container = container.find_element_by_xpath("//span[#class ='decalImage-right']").text
print(info_container)
if not info_container:
info = "no special type"
print(info)
print(info_container)
f.write(product_name + "; " + price + "; " + info + "\n")
continue
if info_container == "https://smyths-at-prod-images.storage.googleapis.com/sys-master/images/hed/h5f/8823589830686" \
"/lego-hard-to-find-decal_CE.svg":
info = "seltenes Set"
elif info_container == "https://smyths-at-prod-images.storage.googleapis.com/sys-master/images/h41/h70" \
"/8823587930142/new-decal_CE%20%281%29.svg":
info = "neues Set"
elif info_container == "https://smyths-at-prod-images.storage.googleapis.com/sys-master/images/hde/hae" \
"/8871381303326/sale-decal_CE.svg":
info = "Sale"
else:
info = "unknown type" + info_container
print(info)
print(info_container)
except NameError:
print("no atribute")
if info_container is None:
info = "unknown type"
print(info)
# writes the dataset to file
f.write(product_name + "; " + price + "; " + info + "\n")
f.close() # Close the file
My output is:
LEGO Star Wars 75244 Tantive IV
price: 199,99€
no special type
and that 484x
I'm not sure why you used selenium to get the products when requests can do it smoothly. The following is something you wanna do to get all the products using requests.
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
link = "https://www.smythstoys.com/at/de-at/at/de-at/spielzeug/lego/c/SM100114/load-more?"
params = {'q':':bestsellerRating:productVisible:true','page':'1'}
p = 0
while True:
params['page'] = p
r = requests.get(link,params=params,headers={
'content-type': 'application/json; charset=utf-8'
})
soup = BeautifulSoup(r.json()['htmlContent'],"lxml")
if not soup.select_one("a.trackProduct[href]"):break
for item in soup.select("a.trackProduct[href]"):
product_name = item.select_one("h2.prodName").get_text(strip=True)
product_price = item.select_one("[itemprop='price']").get("content")
print(product_name,product_price)
p+=1
I am trying to crawl sites's text. But It's only crawling 12 articles.
I don't know why does it do like that. and I wondering If I wanna crawl other pages, What should I do?
import requests
from bs4 import BeautifulSoup
x = int(input("start page:"))
while x < int(input("end page:")):
x = x + 1
url = "https://www.mmtimes.com/national-news.html?page=" + str(x)
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
content = bs_obj.find("div", {"class": "msp-three-col"})
read_more = content.findAll("div", {"class": "read-more"})
for item in read_more:
atag = item.find('a')
link = "https://www.mmtimes.com" + atag["href"]
linkResult = requests.get(link)
subpage = BeautifulSoup(linkResult.content, "html.parser")
fnresult = subpage.find("div", {"class": "field-item even"})
print(fnresult.text)
print("Total "+str(len(read_more))+" articles"))
Check out the below code, I have made some changes. this will result the required output.
import requests
from bs4 import BeautifulSoup
x = int(input("start page:"))
y = input("end page:")
article_count = 0
while x <= int(y):
url = "https://www.mmtimes.com/national-news.html?page=" + str(x)
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
content = bs_obj.find("div", {"class": "msp-three-col"})
read_more = content.findAll("div", {"class": "read-more"})
for item in read_more:
atag = item.find('a')
link = "https://www.mmtimes.com" + atag["href"]
linkResult = requests.get(link)
subpage = BeautifulSoup(linkResult.content, "html.parser")
fnresult = subpage.find("div", {"class": "field-item even"})
print(fnresult.text)
article_count += len(read_more)
print("Total "+str(article_count)+" articles")
x += 1
I developed this program to scrape newegg for ps4 prices. However I want to scrape multiple pages. Here is what I have but once it scrapes the first page the program stops. Basically I am trying to change the link so 'pages-1' changes to 2,3,4 etc. Is there a better way to do this?
from bs4 import BeautifulSoup
import requests
import csv
page_num = 1
prod_num = 0
source = requests.get('https://www.newegg.com/PS4-Systems/SubCategory/ID-3102/Page-' + str(page_num) + '?PageSize=36&order=BESTMATCH').text
soup = BeautifulSoup(source, 'lxml')
csv_file = open('newegg_scrape.csv', 'w')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Product', 'Price', 'Shipping_info'])
for info in soup.find_all('div', class_='item-container'):
prod = info.find('a', class_='item-title').text.strip()
price = info.find('li', class_='price-current').text.strip().splitlines()[1].replace(u'\xa0', '')
if u'$' not in price:
price = info.find('li', class_='price-current').text.strip().splitlines()[0].replace(u'\xa0', '')
ship = info.find('li', class_='price-ship').text.strip()
print(prod)
print(price)
print(ship)
csv_writer.writerow([prod, price, ship])
prod_num += 1
if prod_num > 35: #there is about 35 items per newegg page
page_num += 1
# print(price.splitlines()[1])
print('-----------')
csv_file.close()
i found the page limit num here
and i think you can get the page limit by xpath or other ways:
# xpath syntax may like this
# //span[#class='list-tool-pagination-text']
hope it's useful for you
If you noticed, Next "button" tag of last page has attribute "disabled", So [tag_name].has_attr('disabled') return True . Using this you can manage pagination.
import requests
from bs4 import BeautifulSoup
import csv
csv_file = open('newegg_scrape.csv', 'w')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Product', 'Price', 'Shipping_info'])
URL_PART1 = "https://www.newegg.com/PS4-Systems/SubCategory/ID-3102/Page-"
URL_PART2 = "?PageSize=36&order=BESTMATCH"
PAGE_NO = 1
url = URL_PART1 + str(PAGE_NO) + URL_PART2
while len(url):
PAGE_NO+=1
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
all_divs = soup.find_all('div', attrs={'class':'item-info'})
for item in all_divs:
prod = ""
price = ""
ship = ""
# get product name
prod = item.find('a', attrs={'class':'item-title'})
if prod:
prod = prod.text.strip()
# get price
price_part = item.find('li', attrs={'class':'price-current'})
if price_part:
price_part1 = price_part.strong
if price_part1:
price_part1 = price_part1.text.strip()
price_part2 = price_part.sup
if price_part2:
price_part2 = price_part2.text.strip()
if price_part1 and price_part2:
price = price_part1 + price_part2
# get shipping info
ship = item.find('li', attrs={'class':'price-ship'})
if ship:
ship = ship.text.strip()
csv_writer.writerow([prod, price, ship])
# manage pagination
next_button = soup.find('button', attrs={'title': 'Next'})
if not(next_button.has_attr('disabled')):
url = URL_PART1 + str(PAGE_NO) + URL_PART2
else:
url = ""
My code is showing the first page of hotels. Why isn't it showing more?
import csv
import requests
from bs4 import BeautifulSoup
hotels=[]
i=0
url0 = 'https://www.tripadvisor.com/Hotels-g295424-Dubai_Emirate_of_Dubai- Hotels.html#EATERY_LIST_CONTENTS'
r = requests.get(url0)
data = r.text
soup = BeautifulSoup(r.text, "html.parser")with open('hotels_Data.csv','wb') as file:
for link in soup.findAll('a', {'property_title'}):
print('https://www.tripadvisor.com/Hotels-g295424-' + link.get('href'))
print(link.string)
for i in range(20):
while int(i) <= (20):
i = str(i)
url1 = 'https://www.tripadvisor.com/Hotels-g295424-oa' + i + '- Dubai_Emirate_of_Dubai-Hotels.html#EATERY_LIST_CONTENTS'
r1 = requests.get(url1)
data1 = r1.text
soup1 = BeautifulSoup(data1, "html.parser")
for link in soup1.findAll('a', {'property_title','price'}):
print('https://www.tripadvisor.com/Hotels-g294212-' + link.get('href'))
print(link.string)
for link in soup.select("a.reference.internal"):
url1 = link["href"]
absolute_url = urljoin(base_url, url1)
print(url1, absolute_url)
writer = csv.writer(file)
for row in hotels:
writer.writerow([s.encode("utf-8") for s in row])
break
Check links to next pages at the bottom of page - this portal doesn't use page numbers - 1, 2, 3, etc. - but offer offset - 0, 30, 60, 90 ect. (because it displays 30 offers on page)
So you have to use values 0, 30, 60, 90, etc. in url
"...-oa" + offset + "-Dubai_Emirate..."
You can use ie. range(0, 250, 30) to get values 0, 30, 60, 90.
import requests
from bs4 import BeautifulSoup
for offset in range(0, 250, 30):
print('--- page offset:', offset, '---')
url = 'https://www.tripadvisor.com/Hotels-g295424-oa' + str(offset) + '-Dubai_Emirate_of_Dubai-Hotels.html#EATERY_LIST_CONTENTS'
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
for link in soup.find_all('a', {'property_title'}):
print(link.text)
But there can be more offers than 250 so you have to get link to last page to get correct value instead of 250
import requests
from bs4 import BeautifulSoup
offset = 0
url = 'https://www.tripadvisor.com/Hotels-g295424-oa' + str(offset) + '-Dubai_Emirate_of_Dubai-Hotels.html#EATERY_LIST_CONTENTS'
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
for link in soup.find_all('a', {'last'}):
page_number = link.get('data-page-number')
last_offset = int(page_number) * 30
print('last offset:', last_offset)
And use last_offset+1 in range(0, last_offset+1, 30)
EDIT: Restaurants use JavaScript and AJAX to load data
import requests
from bs4 import BeautifulSoup
size = 30
# direct url - doesn't have expected information
#url = 'https://www.tripadvisor.com/Restaurants-g187791-Rome_Lazio.html'
# url used by AJAX
url = 'https://www.tripadvisor.com/RestaurantSearch?Action=PAGE&geo=187791&ajax=1&itags=10591&sortOrder=relevance&o=a' + str(size) + '&availSearchEnabled=true&eaterydate=2017_04_27&date=2017-04-28&time=20%3A00%3A00&people=2'
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
link = soup.find_all('a')[-1]
page_number = link.get('data-page-number')
last_offset = int(page_number) * size # *30
print('last offset:', last_offset)
offset = link.get('data-offset')
offset = int(offset) + size # +30
print('offset:', offset)