Grabbing some data from a card-body div class - python

Good day.
My script is on progress and I need help or ideas to make it work properly. I am able to grab some data but its not really that readable and useful and your help and ideas are needed.
from bs4 import BeautifulSoup
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"}
url = "https://bscscan.com/tx/0xb07b68f72f0b58e8cfb8c8e896736f49b13775ebda25301475d24554a601ff97#eventlog"
urlpage = requests.get(url, headers=headers, timeout=10, allow_redirects=False)
soup = BeautifulSoup(urlpage.content, 'html.parser')
price = soup.find('div', class_='d-none d-md-inline-block u-label u-label--price rounded mt-1 ml-n1 text-nowrap').get_text()#.strip()
print ("Price: ", price)
data1 = soup.find('div', class_='card-body').get_text()#.strip()
print (data1)
data2 = soup.find('span', class_='btn btn-icon btn-soft-success rounded-circle').get_text()#.strip()
print (data2)
Current Output:
Price:
BNB: $422.35 (-3.05%) | 5 Gwei
Transaction Hash:
0xb07b68f72f0b58e8cfb8c8e896736f49b13775ebda25301475d24554a601ff97
Status:Success
Squeezed text (173 lines).
206
Wanted Output:
Price:
BNB: $422.35 (-3.05%) | 5 Gwei
207 #-- latest data
Address: 0x81e0ef68e103ee65002d3cf766240ed1c070334d
Topics: 0 0x598cd56214a374d15f638dd04913e0288cd76c7833ee66b15cf55845d875a187
Data
0000000000000000000000000000000000000000000000000000000061b23bae
00000000000000000000000000000000000000000000000000000000979144b0

Alternative which caters for always picking up latest transaction (if more transactions added). Because JavaScript doesn't run with requests content isn't as it appears on webpage. You need to target the element with id myTabContent.
I've attempted broadly to go with hopefully more stable selector lists and avoid some of the potentially less robust classes.
import requests
from bs4 import BeautifulSoup as bs
r = requests.get('https://bscscan.com/tx/0xb07b68f72f0b58e8cfb8c8e896736f49b13775ebda25301475d24554a601ff97#eventlog', headers = {'User-Agent':'Mozilla/5.0'})
soup = bs(r.content, 'lxml')
#select price info
price = soup.select_one('#ethPrice').get_text(' ', strip = True)
# select latest event
last_transaction = soup.select_one('#myTabContent div.media:nth-last-child(2)')
latest_number = int(last_transaction.select_one('.btn-icon__inner').text)
address = last_transaction.select_one('a.text-break').text
topic = last_transaction.select_one('li > .text-break').text
print('Price:', price)
print('Latest number:', latest_number)
print('Address:', address)
print('Topics:', topic)
print('Data')
for data in last_transaction.select('[id^=chunk].text-break'):
print(data.text)

Actually,selecting all data according to requirement a little bit complex.I apply css selector,however, you also can apply find_all/find method.
from bs4 import BeautifulSoup
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"}
url = "https://bscscan.com/tx/0xb07b68f72f0b58e8cfb8c8e896736f49b13775ebda25301475d24554a601ff97#eventlog"
urlpage = requests.get(url, headers=headers, timeout=10, allow_redirects=False)
soup = BeautifulSoup(urlpage.content, 'html.parser')
price = soup.find('div', class_='d-none d-md-inline-block u-label u-label--price rounded mt-1 ml-n1 text-nowrap').get_text()#.strip()
print ("Price: ", price)
for card in soup.select('div.media')[1:2]:
num=card.select_one('.mt-1.mr-3').text
print(num)
address=card.select_one('.col-md-10.mb-0 a').text
print(address)
topic=card.select_one('.text-monospace.text-break').text
print(topic)
data1=card.select_one('#chunk_2_4').text
print(data1)
data2=card.select_one('#chunk_2_5').text
print(data2)
Output:
Price:
BNB: $422.41 (-3.65%) | 5 Gwei
207
0x81e0ef68e103ee65002d3cf766240ed1c070334d
0x598cd56214a374d15f638dd04913e0288cd76c7833ee66b15cf55845d875a187
0000000000000000000000000000000000000000000000000000000061b23bae
00000000000000000000000000000000000000000000000000000000979144b0
It's working. The problem was data2=card.select_one('#chunk_2_5') not exist so you are getting None type error but everything is okey:
from bs4 import BeautifulSoup
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"}
url = "https://bscscan.com/tx/0x173c462e910c95a67c119c61566330a835e4785221e247fada6d2279052519f1#eventlog"
urlpage = requests.get(url, headers=headers, timeout=10, allow_redirects=False)
soup = BeautifulSoup(urlpage.content, 'html.parser')
price = soup.find('div', class_='d-none d-md-inline-block u-label u-label--price rounded mt-1 ml-n1 text-nowrap').get_text()#.strip()
print ("Price: ", price)
for card in soup.select('div.media')[1:2]:
num=card.select_one('.mt-1.mr-3').text
print(num)
address=card.select_one('.col-md-10.mb-0 a').text
print(address)
topic=card.select_one('.text-monospace.text-break').text
print(topic)
data1=card.select_one('#chunk_2_4').text
print(data1)
# data2=card.select_one('#chunk_2_5').text
# print(data2)
#If you need all updated data
#for all_data in card.select('[id^=chunk]'):
#print(all_data.text)
Output:
Price:
BNB: $422.25 (-3.15%) | 5 Gwei
315
0x7ee058420e5937496f5a2096f04caa7721cf70cc
0x694af1cc8727cdd0afbdd53d9b87b69248bd490224e9dd090e788546506e076f
0000000000000000000000000000000000000000000000000000000062e6b858

Related

How to grab some part of the link inside the td tag in python

I'm trying to grab the link inside a td. My code does not display the link or produce the desired output. What I need to change.
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
from time import sleep
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"}
urllink = "https://bscscan.com/txs?block=11711353&ps=100&p=1"
reqblockdetails = requests.get(urllink, headers=headers, timeout=5)
soupblockdetails = BeautifulSoup(reqblockdetails.content, 'html.parser')
rowsblockdetails = soupblockdetails.findAll('table')[0].findAll('tr')
sleep(1)
for row in rowsblockdetails[1:]:
txnhash = row.find_all('td')[1].text[0:]
txnhashdetails = txnhash.strip()
destination = row.find_all('td')[8].text[0:]
destination = destination.strip()
if str(destination) == "CoinOne: CONE Token":
urldest = soupblockdetails.find('a', attrs={'class': 'hash-tag text-truncate'}).text
print (" {:>1} {:<5}".format(txnhashdetails, destination))
print (urldest)
else:
pass
Current Output:
0x8265a6ba5ce531df645b883e8735af57241f43e92eb3c9a88f43b89310f964bc CoinOne: CONE Token Validator: Stake2me
Needed Output:
0x8265a6ba5ce531df645b883e8735af57241f43e92eb3c9a88f43b89310f964bc CoinOne: CONE Token 0x9628735017f1a985ebaac0b203efb9e8d3ed0fef
It would be better to search for <a> element in currently selected <td> but not in whole document so I changed code to td = row.find_all('td')[8] and later to td.find('a', ...).
Here is a working code:
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
from time import sleep
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0"}
urllink = "https://bscscan.com/txs?block=11711353&ps=100&p=1"
reqblockdetails = requests.get(urllink, headers=headers, timeout=5)
soupblockdetails = BeautifulSoup(reqblockdetails.content, 'html.parser')
rowsblockdetails = soupblockdetails.findAll('table')[0].findAll('tr')
sleep(1)
for row in rowsblockdetails[1:]:
txnhash = row.find_all('td')[1].text[0:]
txnhashdetails = txnhash.strip()
td = row.find_all('td')[8]
destination = td.text[0:].strip()
if str(destination) == "CoinOne: CONE Token":
urldest = td.find('a', attrs={'class': 'hash-tag text-truncate'})["href"].lstrip("/address/")
print (" {:>1} {:<5}".format(txnhashdetails, destination))
print (urldest)
else:
pass
Hope, it will work. try this:
t_link = soupblockdetails.find('span', attrs={'class': 'hash-tag text-truncate'})
urldest = t_link.a['href']

BS4: Google Next Page "Only the following pseudo-classes are implemented: nth-of-type"

While able to successfully scrape the first page, it does not allow me to do the second. Please note that I do not want to do this with Selinum.
import requests
from bs4 import BeautifulSoup
url = 'https://google.com/search?q=In+order+to&hl=en'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
page = 1
while True:
print()
print('Page {}...'.format(page))
print('-' * 80)
soup = BeautifulSoup(requests.get(url, headers=headers).content, 'html.parser')
for h in soup.select('h3'):
print(h.get_text(strip=True))
next_link = soup.select_one('a:contains("Next")')
if not next_link:
break
url = 'https://google.com' + next_link['href']
page += 1
Result:
Page 1...
--------------------------------------------------------------------------------
In order to Synonyms, In order to Antonyms | Thesaurus.com
In order to - English Grammar Today - Cambridge Dictionary
in order to - Wiktionary
What is another word for "in order to"? - WordHippo
In Order For (someone or something) To | Definition of In ...
In Order For | Definition of In Order For by Merriam-Webster
In order to definition and meaning | Collins English Dictionary
Using "in order to" in English - English Study Page
IN ORDER (FOR SOMEONE / SOMETHING ) TO DO ...
262 In Order To synonyms - Other Words for In Order To
Searches related to In order to
Only the following pseudo-classes are implemented: nth-of-type.
The error lies here:
next_link = soup.select_one('a:contains("Next")')
You can use lxml as a parser instead of html.parser
Install it with pip install lxml
import requests
from bs4 import BeautifulSoup
url = 'https://google.com/search?q=In+order+to&hl=en'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
page = 1
while True:
print()
print('Page {}...'.format(page))
print('-' * 80)
soup = BeautifulSoup(requests.get(url, headers=headers).content, 'lxml')
for h in soup.select('h3'):
print(h.get_text(strip=True))
next_link = soup.select_one('a:contains("Next")')
if not next_link:
break
url = 'https://google.com' + next_link['href']
page += 1

Python BeautifulSoup4 not nesting/iterating

I'm trying to scrape product data of off adidas and nike and am successful in getting the first product's details, but can't get it to iterate through to any additional products. It looks like this is because bs4 is not nesting items inside the I'm pointing to into a list and therefore not allowing me to iterate. I've tried searching around for solutions, but most things I read just point to changing the parser to lxml or html5. I don't think its an issue with the site's html since I'm getting the same issue for both nike and adidas. Both chunks of code are:
Nike
from bs4 import BeautifulSoup
import requests
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
url = 'https://www.nike.com/w/mens-shoes-nik1zy7ok'
page = requests.get(url, headers=headers)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
product_grid = soup.findAll('div', attrs={'class': 'product-grid__items'})
#print(product_grid)
products = []
for card in product_grid:
name = card.find('a', attrs={'class': 'product-card__link-overlay'})
products.append(name.text)
print(products)
Reutrns
['Nike Air VaporMax Flyknit 3']
Adidas
from bs4 import BeautifulSoup
import requests
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
url = 'https://www.adidas.com/us/men-shoes'
page = requests.get(url, headers=headers)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html5lib')
product_grid = soup.findAll('div', attrs={'class': 'product-container___3GvlZ'})
#print(product_grid)
products = []
for card in product_grid:
name = card.find('div', attrs={'class': 'gl-product-card__name'})
products.append(name.text)
print(products)
Returns
['NMD_R1 Shoes']
You can check the following code:
from bs4 import BeautifulSoup
import requests
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}
url = 'https://www.nike.com/w/mens-shoes-nik1zy7ok'
page = requests.get(url, headers=headers)
if page.status_code == 200:
soup = BeautifulSoup(page.content, 'html.parser')
product_grid = soup.findAll('div', attrs={'class': 'product-grid__items'})
products = []
for card in product_grid:
names = card.findAll('a', attrs={'class': 'product-card__link-overlay'})
for element in names:
products.append(element.text)
print(products)
The issue was in: name = card.find('a', attrs={'class': 'product-card__link-overlay'}).
If you print it out, you get a single name because you are doing .find not .findAll

Beatifulsoup not returning full html of the page

I want to scrape few pages from amazon website like title,url,aisn and i run into a problem that script only parsing 15 products while on the page it is showing 50. i decided to print out all html to console and i saw that the html is ending at 15 products without any errors from the script.
Here is the part of my script
keyword = "men jeans".replace(' ', '+')
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b3) Gecko/20090305 Firefox/3.1b3 GTB5'}
url = "https://www.amazon.com/s/field-keywords={}".format(keyword)
request = requests.session()
req = request.get(url, headers = headers)
sleep(3)
soup = BeautifulSoup(req.content, 'html.parser')
print(soup)
It's because few of the items are generated dynamically. There might be any better solution other than using selenium. However, as a workaround you can try the below way instead.
from selenium import webdriver
from bs4 import BeautifulSoup
def fetch_item(driver,keyword):
driver.get(url.format(keyword.replace(" ", "+")))
soup = BeautifulSoup(driver.page_source, 'html.parser')
for items in soup.select("[id^='result_']"):
try:
name = items.select_one("h2").text
except AttributeError: name = ""
print(name)
if __name__ == '__main__':
url = "https://www.amazon.com/s/field-keywords={}"
driver = webdriver.Chrome()
try:
fetch_item(driver,"men jeans")
finally:
driver.quit()
Upon running the above script you should get 56 names or something as result.
import requests
from bs4 import BeautifulSoup
for page in range(1, 21):
keyword = "red car".replace(' ', '+')
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1b3) Gecko/20090305 Firefox/3.1b3 GTB5'}
url = "https://www.amazon.com/s/field-keywords=" + keyword + "?page=" + str(page)
request = requests.session()
req = request.get(url, headers=headers)
soup = BeautifulSoup(req.content, 'html.parser')
results = soup.findAll("li", {"class": "s-result-item"})
for i in results:
try:
print(i.find("h2", {"class": "s-access-title"}).text.replace('[SPONSORED]', ''))
print(i.find("span", {"class": "sx-price-large"}).text.replace("\n", ' '))
print('*' * 20)
except:
pass
Amazon's page range is max till 20 here is it crawling the pages

Can't find and process text taken out of an HTML

I'm trying to search in a webpage the "spanish" content but can't get it at all.
This is the code I have so far:
from bs4 import BeautifulSoup
import requests
import re
url = 'http://www.autotaskstatus.net/'
r = requests.get(url)
estado = r.status_code
r = r.content
soup = BeautifulSoup(r, "html.parser")
data = soup.find_all('span', attrs={'class':'name'})[1]
pais = 'Spanish'
data.get_text()
print(data.text)
I have there the "pais" var so it will be replaced by an input so the user can search the country they want.
The only data I get with a 1 there is "Limited Release" but if I go with a 0 I can't filter the results at all
I have been searching all over Internet and couldn't find anyone with this same problem so I can't find a solution.
I am using Python 3.6
Edit: since people seemed to find this unclear I'll explain it now
What I have on the page is: - just a part
<div data-component-id="fp5s6cp13l47"
class="component-inner-container status-green "
data-component-status="operational"
data-js-hook="">
<span class="name">
Concord
</span>
<span class="tooltip-base tool" title="https://concord.centrastage.net">?</span>
<span class="component-status">
Operational
</span>
So spanish is like "Concord" and what I want to take out is the "Spanish" (and later on the "operational") which will be in a var so it can later be changed for any country there
You can get the Spanish server status using this approach:
from bs4 import BeautifulSoup
import requests
URL = 'http://www.autotaskstatus.net/'
with requests.session() as s:
s.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0'}
r = s.get(URL)
soup = BeautifulSoup(r.content, "html.parser")
data = soup.find_all('div', attrs={'class':'component-inner-container'})
pais = 'Spanish'
print([d.find('span', {'class': 'name'}).text.strip() + ' - ' + d.find('span', {'class': 'component-status'}).text.strip() for d in data if pais in d.text])

Categories

Resources