Iterate and extract info over div class - python

import requests
from bs4 import BeautifulSoup
url = "https://boulder.noshdelivery.co/restaurants"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
restaurant_wrapper = soup.find(class_ = "dd_rest_list")
restaurants = restaurant_wrapper.find_all(class_="menu__vendor-name")
restaurant_street_address = restaurant_wrapper.find("span", itemprop="streetAddress")
restaurant_address_locality = restaurant_wrapper.find("span", itemprop="addressLocality")
def extract_restaurant_data(restaurant):
restaurant_title = restaurant_wrapper.find(class_="menu__vendor-name")
return {
"title" : restaurant_title.text.strip(),
"streetAddress": restaurant_street_address.text.strip(),
"addressLocality": restaurant_address_locality.text.strip()
}
results = [extract_restaurant_data(restaurant) for restaurant in restaurants]
print(results)
I would like to know why this code, prints exactly the same info and does not iterate over the list of restaurants.
My output is this
{'title': '5280 Cafe At Rallysport', 'streetAddress': '2727 29th St.',
'addressLocality': 'Boulder'},
{'title': '5280 Cafe At Rallysport', 'streetAddress': '2727 29th St.', ' addressLocality': 'Boulder'}........
The info is the same. I do not know why my code does not iterate over the different names from the list of "restaurants"

You only did one find the data. Do a find_all on each section and then zip them together!
restaurant_details = zip(
restaurant_wrapper.find_all(class_="menu__vendor-name"),
restaurant_wrapper.find_all("span", itemprop="streetAddress"),
restaurant_wrapper.find_all("span", itemprop="addressLocality"),
)
results = [
{
"title": title.text.strip(),
"streetAddress": street_address.text.strip(),
"addressLocality": address_locality.text.strip()
}
for title, street_address, address_locality in restaurant_details
]
print(results)

You function has restaurant_wrapper.find(class_="menu__vendor-name") written in it, so each time it runs it would print only the first occurence of the class menu__vendor-name.
To print a new restaurant's detail in each iteration you would have to access each web element individually.
The code below would allow you to get the details for all restaurants.
restwords = restaurant_wrapper.find_all("div", {"class": "dd_restwords"})
def extract_restaurant_data(restaurant):
title = restaurant.find("div", {"class": "menu__vendor-name"}).text
streetAddress = restaurant.find("span", {"itemprop": "streetAddress"}).text
addressLocality = restaurant.find("span", {"itemprop": "addressLocality"}).text
rest_data = {
"title": title,
"streetAddress": streetAddress,
"addressLocality": addressLocality
}
return rest_data
for restaurant in restwords:
print(extract_restaurant_data(restaurant))

Related

How to fix this error during scraping using BeautifulSoup?

I am trying to do web scraping using BeautifulSoup and requests Python library. I want to filter the news titles from Hacker News website but its showing an error while implementing.
import requests
from bs4 import BeautifulSoup
res = requests.get('https://news.ycombinator.com/news')
soup = BeautifulSoup(res.text, 'html.parser')
links = soup.select('.titleline a')
subtext = soup.select('.subtext')
def create_custom_hn(links, subtext):
hn = []
for index, item in enumerate(links):
title = links[index].getText()
href = links[index].get('href', None)
votes = subtext[index].select('.score')
if len(votes):
points = int(votes[0].getText().replace(' points', ''))
print(points)
hn.append({'title': title, 'href': href})
return hn
print(create_custom_hn(links, subtext))
The error says
votes = subtext[index].select('.score')
~~~~~~~^^^^^^^
IndexError: list index out of range
Here is fixed version of the code from the question:
import requests
from bs4 import BeautifulSoup
res = requests.get("https://news.ycombinator.com/news")
soup = BeautifulSoup(res.text, "html.parser")
links = soup.select(".titleline > a")
def create_custom_hn(links):
hn = []
for link in links:
title = link.getText()
href = link.get("href", None)
votes = link.find_next(class_="score")
points = int(votes.getText().replace(" points", ""))
hn.append({"title": title, "href": href, "points": points})
return hn
print(create_custom_hn(links))
Prints:
[
{
"title": "Urllib3 in 2022",
"href": "https://sethmlarson.dev/urllib3-in-2022",
"points": 97,
},
{
"title": "First public release of Pushup: a new compiler for making web apps in Go",
"href": "https://github.com/adhocteam/pushup",
"points": 18,
},
{
"title": "Intelligence – A good collection of great OSINT Resources",
"href": "https://github.com/ARPSyndicate/awesome-intelligence",
"points": 113,
},
{
"title": "Microsoft is preparing to add ChatGPT to Bing",
"href": "https://www.bloomberg.com/news/articles/2023-01-04/microsoft-hopes-openai-s-chatbot-will-make-bing-smarter",
"points": 760,
},
...and so on.
Try to select your elements more specific, your selection of soup.select('.titleline a') includes more elements (60) as you may like to select (30):
[Urllib3 in 2022,
<span class="sitestr">sethmlarson.dev</span>,...]
I would also recommend to iterate the elements in another way, so you would become able to handle missing values.
Example
import requests
from bs4 import BeautifulSoup
res = requests.get('https://news.ycombinator.com/news')
soup = BeautifulSoup(res.text)
data = []
for e in soup.select('tr.athing'):
data.append({
'title':e.select_one('.titleline a').get_text(),
'url':e.select_one('.titleline a').get('href'),
'votes':e.find_next(class_='subtext').text.split()[0]
})
print(data)
Output
[{'title': 'Urllib3 in 2022', 'url': 'https://sethmlarson.dev/urllib3-in-2022', 'votes': '93'}, {'title': 'First public release of Pushup: a new compiler for making web apps in Go', 'url': 'https://github.com/adhocteam/pushup', 'votes': '16'}, {'title': 'Intelligence – A good collection of great OSINT Resources', 'url': 'https://github.com/ARPSyndicate/awesome-intelligence', 'votes': '109'}, {'title': 'Microsoft is preparing to add ChatGPT to Bing', 'url': 'https://www.bloomberg.com/news/articles/2023-01-04/microsoft-hopes-openai-s-chatbot-will-make-bing-smarter', 'votes': '755'}, {'title': 'Juan Tamariz, the godfather of close-up card magic', 'url': 'https://www.nytimes.com/2023/01/02/magazine/juan-tamariz-magic.html', 'votes': '31'}, {'title': 'The Expanding Dark Forest and Generative AI', 'url': 'https://maggieappleton.com/ai-dark-forest', 'votes': '223'}, {'title': 'Irreconcilable differences between local and distributed computing (1994)', 'url': 'https://scholar.harvard.edu/waldo/publications/note-distributed-computing', 'votes': '29'},...]

Cannot getting the "href" attributes via BeautifulSoup

in short, i can't get the links of "href" attribute from this link (a turkish online book and related stuff seller).
here's my code (i know it's not the best, i'm learning python for a few months online, so any heads up for best practices also welcomed)
i tried to get book names, writers, prices, publishers and the links for each book; without links it's working as i expected.
import requests
import pandas as pd
from bs4 import BeautifulSoup
from time import sleep
from random import randint
yazar = []
fiyat = []
yayın = []
isim = []
for i in range(1,10):
url = "https://www.dr.com.tr/CokSatanlar/Kitap#/page="+str(i)
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
# book names
k = soup.find_all("a", {"class":"prd-name"})
for i in k:
isim.append(i.text)
# writer names
y = soup.find_all("a", {"class":"who text-overflow"})
for i in y:
yazar.append(i.text)
# prices
f = soup.find_all("div", {"class":"prd-price"})
for i in f:
fiyat.append(i.text.split()[0])
# publishers
ye = soup.find_all("a", {"class":"prd-publisher"})
for i in ye:
yayın.append(i.get("title"))
sleep(randint(2, 4))
however when i try to get links
soup.find_all("a", {"class":"prd-name"}).get("href")
it turns none and i couldn't manage to make this work whatever i tried.
thank you all in advance and sorry for a little longer than usual post.
The data you see on the page is loaded from external location, so you need other URL to get correct data:
import requests
import pandas as pd
from bs4 import BeautifulSoup
url = "https://www.dr.com.tr/Catalog/CatalogProducts"
data = {
"catalogId": "4020",
"page": "1",
"sortfield": "soldcount",
"sortorder": "desc",
"size": "60",
"categoryid": "0",
"parentId": "0",
"mediatypes": "",
"HideNotForSale": "true",
"minPrice": "-1",
"maxPrice": "-1",
"writer": "",
"minDiscount": "-1",
"maxdiscount": "-1",
"language": "",
}
all_data = []
for page in range(1, 3): # <-- increase number of pages here
print(f"Getting page {page}")
data["page"] = page
soup = BeautifulSoup(requests.post(url, data=data).content, "html.parser")
for p in soup.select(".prd-content"):
all_data.append(p.get_text(strip=True, separator="|").split("|")[:5])
df = pd.DataFrame(
all_data, columns=["name", "autor", "price", "type", "publisher"]
)
print(df)
df.to_csv("data.csv", index=False)
Prints:
name autor price type publisher
0 Esra Ezmeci Seti 5 Kitap Takım - Defter Hediyeli Esra Ezmeci 155,45 TL İnce Kapak Destek Yayınları
1 Şimdi Onlar Düşünsün Bircan Yıldırım 36,20 TL İnce Kapak Destek Yayınları
2 İz Bıraktığın Kadar Varsın Esra Ezmeci 36,20 TL İnce Kapak Destek Yayınları
...
and saves data.csv (screenshot from Libre Office):
Think you wont get a None you will get:
AttributeError: ResultSet object has no attribute 'get'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?
find_all() produces a ResultSet, so you have to iterate it to get all the href:
for a in soup.find_all("a", {"class":"prd-name"}):
print('https://www.dr.com.tr'+a.get("href"))
Output
https://www.dr.com.tr/kitap/daha-adil-bir-dunya-mumkun/arastirma-tarih/politika-arastirma/turkiye-politika-/urunno=0001934858001
https://www.dr.com.tr/kitap/burasi-cok-onemli-enerjiden-ekonomiye-tam-bagimsiz-turkiye/arastirma-tarih/politika-arastirma/turkiye-politika-/urunno=0001966362001
https://www.dr.com.tr/kitap/iz-biraktigin-kadar-varsin/egitim-basvuru/psikoloji-bilimi/urunno=0001947472001
https://www.dr.com.tr/kitap/simdi-onlar-dusunsun/bircan-yildirim/egitim-basvuru/kisisel-gelisim/urunno=0001964436001
https://www.dr.com.tr/kitap/kadinlar-sicak-erkekler-soguk-sever/esra-ezmeci/egitim-basvuru/psikoloji-bilimi/urunno=0001904239001
https://www.dr.com.tr/kitap/dustugunde-kalkarsan-hayat-guzeldir/egitim-basvuru/psikoloji-bilimi/urunno=0001816754001
...

How can I only parse the first HTML block from multiple blocks, if they all contain the same class-name?

I need to parse info from a site, on this site, there are 2 blocks, "Today" and "Yesterday", and they have the same class name of standard-box standard-list.
How can I only parse the first block (under "Today") in a row, without extracting the inform from "Yesterday", if they both contain the same class-name?
Here is my code:
import requests
url_news = "https://www.123.org/"
response = requests.get(url_news)
soup = BeautifulSoup(response.content, "html.parser")
items = soup.findAll("div", class_="standard-box standard-list")
news_info = []
for item in items:
news_info.append({
"title": item.find("div", class_="newstext",).text,
"link": item.find("a", class_="newsline article").get("href")
})
When running your provided code, I don't get an output for items. However, you said that you do, so:
If you only want to get the data under "Today", you can use .find() instead of .find_all(), since .find() will only return the first found tag -- which is "Today" and not the other tags.
So, instead of:
items = soup.findAll("div", class_="standard-box standard-list")
Use:
items = soup.find("div", class_="standard-box standard-list")
Additionally, to find the link, I needed to access the attribute using tag-name[attribute]. Here is working code:
news_info = []
items = soup.find("div", class_="standard-box standard-list")
for item in items:
news_info.append(
{"title": item.find("div", class_="newstext").text, "link": item["href"]}
)
print(news_info)
Output:
[{'title': 'NIP crack top 3 ranking for the first time in 5 years', 'link': 'https://www.hltv.org/news/32545/nip-crack-top-3-ranking-for-the-first-time-in-5-years'}, {'title': 'Fessor joins Astralis Talent', 'link': 'https://www.hltv.org/news/32544/fessor-joins-astralis-talent'}, {'title': 'Grashog joins AGO', 'link': 'https://www.hltv.org/news/32542/grashog-joins-ago'}, {'title': 'ISSAA parts ways with Eternal Fire', 'link': 'https://www.hltv.org/news/32543/issaa-parts-ways-with-eternal-fire'}, {'title': 'BLAST Premier Fall Showdown Fantasy live', 'link': 'https://www.hltv.org/news/32541/blast-premier-fall-showdown-fantasy-live'}, {'title': 'FURIA win IEM Fall NA, EG claim final Major Legends spot', 'link': 'https://www.hltv.org/news/32540/furia-win-iem-fall-na-eg-claim-final-major-legends-spot'}]

Scrape eBay Sold Items Using Selenium Returns []

I have almost no webscraping experience, and wasn't able to solve this using BeautifulSoup, so I'm trying selenium (installed it today). I'm trying to scrape sold items on eBay. I'm trying to scrape:
https://www.ebay.com/sch/i.html?_from=R40&_nkw=oakley+sunglasses&_sacat=0&Brand=Oakley&rt=nc&LH_Sold=1&LH_Complete=1&_ipg=200&_oaa=1&_fsrp=1&_dcat=79720
Here is my code where I load in html code and convert to selenium html:
ebay_url = 'https://www.ebay.com/sch/i.html?_from=R40&_nkw=oakley+sunglasses&_sacat=0&Brand=Oakley&rt=nc&LH_Sold=1&LH_Complete=1&_ipg=200&_oaa=1&_fsrp=1&_dcat=79720'
html = requests.get(ebay_url)
#print(html.text)
driver = wd.Chrome(executable_path=r'/Users/mburley/Downloads/chromedriver')
driver.get(ebay_url)
Which correctly opens a new chrome session at the correct url. I'm working on getting the titles, prices, and date sold and then loading it into a csv file. Here is the code I have for those:
# Find all div tags and set equal to main_data
all_items = driver.find_elements_by_class_name("s-item__info clearfix")[1:]
#print(main_data)
# Loop over main_data to extract div classes for title, price, and date
for item in all_items:
date = item.find_element_by_xpath("//span[contains(#class, 'POSITIVE']").text.strip()
title = item.find_element_by_xpath("//h3[contains(#class, 's-item__title s-item__title--has-tags']").text.strip()
price = item.find_element_by_xpath("//span[contains(#class, 's-item__price']").text.strip()
print('title:', title)
print('price:', price)
print('date:', date)
print('---')
data.append( [title, price, date] )
Which just returns []. I think ebay may be blocking my IP, but the html code loads in and looks correct. Hopefully someone can help! Thanks!
It is not necessary to use Selenium for eBay scraping, as the data is not rendered by JavaScript thus can be extracted from plain HTML. It is enough to use BeautifulSoup web scraping library.
Keep in mind that problems with site parsing may arise when you try to request a site multiple times. eBay may consider that this is a bot that sends a request (not a real user).
To avoid this, one of the ways is to send headers that contain user-agent in the request, then the site will assume that you're a user and display information.
As an additional step is to rotate those user-agents. The ideal scenario is to use proxies in combo with rotated user-agents (besides CAPTCHA solver)
from bs4 import BeautifulSoup
import requests, json, lxml
# https://requests.readthedocs.io/en/latest/user/quickstart/#custom-headers
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36"
}
params = {
'_nkw': 'oakley+sunglasses', # search query
'LH_Sold': '1', # shows sold items
'_pgn': 1 # page number
}
data = []
while True:
page = requests.get('https://www.ebay.com/sch/i.html', params=params, headers=headers, timeout=30)
soup = BeautifulSoup(page.text, 'lxml')
print(f"Extracting page: {params['_pgn']}")
print("-" * 10)
for products in soup.select(".s-item__info"):
title = products.select_one(".s-item__title span").text
price = products.select_one(".s-item__price").text
link = products.select_one(".s-item__link")["href"]
data.append({
"title" : title,
"price" : price,
"link" : link
})
if soup.select_one(".pagination__next"):
params['_pgn'] += 1
else:
break
print(json.dumps(data, indent=2, ensure_ascii=False)
Example output
Extracting page: 1
----------
[
{
"title": "Shop on eBay",
"price": "$20.00",
"link": "https://ebay.com/itm/123456?hash=item28caef0a3a:g:E3kAAOSwlGJiMikD&amdata=enc%3AAQAHAAAAsJoWXGf0hxNZspTmhb8%2FTJCCurAWCHuXJ2Xi3S9cwXL6BX04zSEiVaDMCvsUbApftgXEAHGJU1ZGugZO%2FnW1U7Gb6vgoL%2BmXlqCbLkwoZfF3AUAK8YvJ5B4%2BnhFA7ID4dxpYs4jjExEnN5SR2g1mQe7QtLkmGt%2FZ%2FbH2W62cXPuKbf550ExbnBPO2QJyZTXYCuw5KVkMdFMDuoB4p3FwJKcSPzez5kyQyVjyiIq6PB2q%7Ctkp%3ABlBMULq7kqyXYA"
},
{
"title": "Oakley X-metal Juliet Men's Sunglasses",
"price": "$280.00",
"link": "https://www.ebay.com/itm/265930582326?hash=item3deab2a936:g:t8gAAOSwMNhjRUuB&amdata=enc%3AAQAHAAAAoH76tlPncyxembf4SBvTKma1pJ4vg6QbKr21OxkL7NXZ5kAr7UvYLl2VoCPRA8KTqOumC%2Bl5RsaIpJgN2o2OlI7vfEclGr5Jc2zyO0JkAZ2Gftd7a4s11rVSnktOieITkfiM3JLXJM6QNTvokLclO6jnS%2FectMhVc91CSgZQ7rc%2BFGDjXhGyqq8A%2FoEyw4x1Bwl2sP0viGyBAL81D2LfE8E%3D%7Ctkp%3ABk9SR8yw1LH9YA"
},
{
"title": " Used Oakley PROBATION Sunglasses Polished Gold/Dark Grey (OO4041-03)",
"price": "$120.00",
"link": "https://www.ebay.com/itm/334596701765?hash=item4de7847e45:g:d5UAAOSw4YtjTfEE&amdata=enc%3AAQAHAAAAoItMbbzfQ74gNUiinmOVnzKlPWE%2Fc54B%2BS1%2BrZpy6vm5lB%2Bhvm5H43UFR0zeCU0Up6sPU2Wl6O6WR0x9FPv5Y1wYKTeUbpct5vFKu8OKFBLRT7Umt0yxmtLLMWaVlgKf7StwtK6lQ961Y33rf3YuQyp7MG7H%2Fa9fwSflpbJnE4A9rLqvf3hccR9tlWzKLMj9ZKbGxWT17%2BjyUp19XIvX2ZI%3D%7Ctkp%3ABk9SR8yw1LH9YA"
},
As an alternative, you can use Ebay Organic Results API from SerpApi. It`s a paid API with a free plan that handles blocks and parsing on their backend.
Example code that paginates through all pages:
from serpapi import EbaySearch
import os, json
params = {
"api_key": os.getenv("API_KEY"), # serpapi api key
"engine": "ebay", # search engine
"ebay_domain": "ebay.com", # ebay domain
"_nkw": "oakley+sunglasses", # search query
"_pgn": 1, # page number
"LH_Sold": "1" # shows sold items
}
search = EbaySearch(params) # where data extraction happens
page_num = 0
data = []
while True:
results = search.get_dict() # JSON -> Python dict
if "error" in results:
print(results["error"])
break
for organic_result in results.get("organic_results", []):
link = organic_result.get("link")
price = organic_result.get("price")
data.append({
"price" : price,
"link" : link
})
page_num += 1
print(page_num)
if "next" in results.get("pagination", {}):
params['_pgn'] += 1
else:
break
print(json.dumps(data, indent=2))
Output:
[
{
"price": {
"raw": "$68.96",
"extracted": 68.96
},
"link": "https://www.ebay.com/itm/125360598217?epid=20030526224&hash=item1d3012ecc9:g:478AAOSwCt5iqgG5&amdata=enc%3AAQAHAAAA4Ls3N%2FEH5OR6w3uoTlsxUlEsl0J%2B1aYmOoV6qsUxRO1d1w3twg6LrBbUl%2FCrSTxNOjnDgIh8DSI67n%2BJe%2F8c3GMUrIFpJ5lofIRdEmchFDmsd2I3tnbJEqZjIkWX6wXMnNbPiBEM8%2FML4ljppkSl4yfUZSV%2BYXTffSlCItT%2B7ZhM1fDttRxq5MffSRBAhuaG0tA7Dh69ZPxV8%2Bu1HuM0jDQjjC4g17I3Bjg6J3daC4ZuK%2FNNFlCLHv97w2fW8tMaPl8vANMw8OUJa5z2Eclh99WUBvAyAuy10uEtB3NDwiMV%7Ctkp%3ABk9SR5DKgLD9YA"
},
{
"price": {
"raw": "$62.95",
"extracted": 62.95
},
"link": "https://www.ebay.com/itm/125368283608?epid=1567457519&hash=item1d308831d8:g:rnsAAOSw7PJiqMQz&amdata=enc%3AAQAHAAAA4AwZhKJZfTqrG8VskZL8rtfsuNtZrMdWYpndpFs%2FhfrIOV%2FAjLuzNzaMNIvTa%2B6QUTdkOwTLRun8n43cZizqtOulsoBLQIwy3wf19N0sHxGF5HaIDOBeW%2B2sobRnzGdX%2Fsmgz1PRiKFZi%2BUxaLQpWCoGBf9n8mjcsFXi3esxbmAZ8kenO%2BARbRBzA2Honzaleb2tyH5Tf8%2Bs%2Fm5goqbon%2FcEsR0URO7BROkBUUjDCdDH6fFi99m6anNMMC3yTBpzypaFWio0u2qu5TgjABUfO1wzxb4ofA56BNKjoxttb7E%2F%7Ctkp%3ABk9SR5DKgLD9YA"
},
# ...
]
Disclaimer, I work for SerpApi.
You can use the below code to scrape the details. also you can use pandas to store data in csv file.
Code :
ebay_url = 'https://www.ebay.com/sch/i.html?_from=R40&_nkw=oakley+sunglasses&_sacat=0&Brand=Oakley&rt=nc&LH_Sold=1&LH_Complete=1&_ipg=200&_oaa=1&_fsrp=1&_dcat=79720'
html = requests.get(ebay_url)
# print(html.text)
driver = wd.Chrome(executable_path=r'/Users/mburley/Downloads/chromedriver')
driver.maximize_window()
driver.implicitly_wait(30)
driver.get(ebay_url)
wait = WebDriverWait(driver, 20)
sold_date = []
title = []
price = []
i = 1
for item in driver.find_elements(By.XPATH, "//div[contains(#class,'title--tagblock')]/span[#class='POSITIVE']"):
sold_date.append(item.text)
title.append(driver.find_element_by_xpath(f"(//div[contains(#class,'title--tagblock')]/span[#class='POSITIVE']/ancestor::div[contains(#class,'tag')]/following-sibling::a/h3)[{i}]").text)
price.append(item.find_element_by_xpath(f"(//div[contains(#class,'title--tagblock')]/span[#class='POSITIVE']/ancestor::div[contains(#class,'tag')]/following-sibling::div[contains(#class,'details')]/descendant::span[#class='POSITIVE'])[{i}]").text)
i = i + 1
print(sold_date)
print(title)
print(price)
data = {
'Sold_date': sold_date,
'title': title,
'price': price
}
df = pd.DataFrame.from_dict(data)
df.to_csv('out.csv', index = 0)
Imports :
import pandas as pd
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By

Web scrape Google search results using BeautifulSoup

My goal is to web scrape Google search results using BeautifulSoup. I am using Anaconda Python and use Ipython as the IDE console. Why don't I get an ouptput when run the following command?
def google_scrape(query):
address = "http://www.google.com/search?q=%s&num=100&hl=en&start=0" % (urllib.quote_plus(query))
request = urllib2.Request(address, None, {'User-Agent':'Mosilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11'})
urlfile = urllib2.urlopen(request)
page = urlfile.read()
soup = BeautifulSoup(page)
linkdictionary = {}
for li in soup.findAll('li', attrs={'class':'g'}):
sLink = li.find('a')
print sLink['href']
sSpan = li.find('span', attrs={'class':'st'})
print sSpan
return linkdictionary
if __name__ == '__main__':
links = google_scrape('english')
You are never adding anything to linkedDictionary
def google_scrape(query):
address = "http://www.google.com/search?q=%s&num=100&hl=en&start=0" % (urllib.quote_plus(query))
request = urllib2.Request(address, None, {'User-Agent':'Mosilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11'})
urlfile = urllib2.urlopen(request)
page = urlfile.read()
soup = BeautifulSoup(page)
linkdictionary = {}
for li in soup.findAll('li', attrs={'class':'g'}):
sLink = li.find('a')
sSpan = li.find('span', attrs={'class':'st'})
linkeDictionary['href'] = sLink['href']
linkedDictionary['sSpan'] = sSpan
return linkdictionary
if __name__ == '__main__':
links = google_scrape('english')
The problem as Cody Bouche mentioned is that nothing has been adding to the dict().
In my opinion, you'll have hard times updating your dict if you haven't change {}(dict) to [](array).
Appending to array is much simpler (note: I could be wrong here, it's just a personal opinion from previous experience).
To make it work in a simple maner, you need to change dict to array {} --> [] and then use .append({}) to append to list()
Code and example in the online IDE:
def google_scrape(query):
html = requests.get(f'https://www.google.com/search?q={query}', headers=headers).text
soup = BeautifulSoup(html, 'lxml')
data = []
for container in soup.findAll('div', class_='tF2Cxc'):
title = container.select_one('.DKV0Md').text
link = container.find('a')['href']
data.append({
'title': title,
'link': link,
})
print(f'{title}\n{link}')
print(json.dumps(data, indent=2))
google_scrape('english')
# part of the outputs:
'''
English language - Wikipedia
https://en.wikipedia.org/wiki/English_language
[
{
"title": "English language - Wikipedia",
"link": "https://en.wikipedia.org/wiki/English_language"
},
]
'''
If you still want to append to dict() then this is one of the ways of approaching this (only part of the for loop shown):
for container in soup.findAll('div', class_='tF2Cxc'):
data_dict = {}
title = container.select_one('.DKV0Md').text
link = container.find('a')['href']
# creates title key and assigns title value
data_dict['title'] = title
# creates link key and assigns link value
data_dict['link'] = link
print(json.dumps(data_dict, indent = 2))
# part of the output:
'''
{
"title": "Minecraft Official Site | Minecraft",
"link": "https://www.minecraft.net/en-us/"
}
'''
Alternatively, you can do the same thing using Google Search Engine Results API from SerpApi. It's a paid API with a free plan.
Essentially, it's doing the same thing as the code above, but you don't to figure out how to do certain things or trying to understand how to scrape certain element, it's already done for the end-user with a JSON output so the only thing that needs to be done is to iterate over a JSON and get the desired output.
Code to integrate:
from serpapi import GoogleSearch
import json
params = {
"api_key": "YOUR_API_KEY",
"engine": "google",
"q": "minecraft",
}
search = GoogleSearch(params)
results = search.get_dict()
for result in results['organic_results']:
print(json.dumps(result, indent = 2, ensure_ascii = False))
# part of the json output:
'''
{
"position": 1,
"title": "Minecraft - Aplikasi di Google Play",
"link": "https://play.google.com/store/apps/details?id=com.mojang.minecraftpe&hl=in&gl=US",
"displayed_link": "https://play.google.com › store › apps › details › id=co...",
"rich_snippet": {
"top": {
"detected_extensions": {
"skor": 46,
"suara": 4144655,
"us": 749
},
"extensions": [
"Skor: 4,6",
"‎4.144.655 suara",
"‎US$7,49",
"‎Android",
"‎Game"
]
}
}
'''
Disclaimer, I work for SerpApi.

Categories

Resources