I am trying to scrape data from a Sharepoint URL and save that data to a local excel file.
The file saves but it doesnt have the data from the online excel spreadsheet.
import requests
url = 'https://crokepark-my.sharepoint.com/:x:/r/personal/ruairi_harvey_gaa_ie/_layouts/15/Doc.aspx?guestaccesstoken=Gc2myfwceMcTJO0Sm78dGMt4Up6MH9VlzlUxxsMV%2Fgk%3D&docid=04bc452cba06b4bfea0d1ed80a2b5fac6&action=default&cid=f2e59e9a-71fd-4fd4-aaa6-8e3a4e88fe40'
response = requests.get(url)
with open('file.HTML', 'wb') as f:
f.write(response.content)
from bs4 import BeautifulSoup
# Open the HTML file
with open("file.html", "r") as file:
html_content = file.read()
# Use BeautifulSoup to parse the HTML
soup = BeautifulSoup(html_content, 'html.parser')
# Find the table in the HTML using its attributes (e.g. class, id)
table = soup.find('table', attrs={'class': 'table-class-name'})
soup
# Extract the table headers
headers = [header.text for header in table.find_all('Aimn')]
# Extract the table data rows
rows = []
for row in table.find_all('tr'):
rows.append([cell.text for cell in row.find_all('td')])
# Print the table headers and data
print(headers)
print(rows)
The error says it can't find the header in the table.
After inspecting the Soup data I don't see any of the data on the excel file.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_4532/3438091026.py in <module>
1 # Extract the table headers
----> 2 headers = [header.text for header in table.find_all('Aimn')]
3
4 # Extract the table data rows
5 rows = []
AttributeError: 'NoneType' object has no attribute 'find_all'
Related
I want to Retrieve a financial dataset from a Website which has a log in. I've managed to log in using requests and access the HTML
from bs4 import BeautifulSoup
import pandas as pd
s = requests.session()
login_data = dict(email='my login', password='password')
s.post('*portal webiste with/login*', data=login_data)
r = s.get(' *website with finacial page* ')
print (r.content)
## work on r as its a direct link
url = r # stock url
page = url
soup = BeautifulSoup(page.text) # returns the htm of the finance page.
The above code allows me to log in and get the html from the correct page.
headers = []
# finds all the headers.
for i in table.find_all('th'):
title = i.text.strip()
headers.append(title)
df = pd.DataFrame(columns = headers)
print(df)
this block finds the table and gets the column headers.
which are printed as:
Columns: [Date, Type, Type, Credit, Debit, Outstanding, Case File, ]
The next part is the problem. when I attempt to retrieve the financials using the following code:
for row in table.find_all('tr')[1:]:
data = row.find_all('td')
row_data = [td.text.strip()for td in data]
print(row_data)
it returns this
['"Loading Please Wait..."']
HTML of the site looks like this
html of the site i want to scrape
I have been working on webscraping the infobox information on Wikipedia. This is the following code that I have been using:
import requests
import csv
from bs4 import BeautifulSoup
URL = ['https://en.wikipedia.org/wiki/Workers_Credit_Union','https://en.wikipedia.org/wiki/San_Diego_County_Credit_Union',
'https://en.wikipedia.org/wiki/USA_Federal_Credit_Union','https://en.wikipedia.org/wiki/Commonwealth_Credit_Union',
'https://en.wikipedia.org/wiki/Center_for_Community_Self-Help','https://en.wikipedia.org/wiki/ESL_Federal_Credit_Union',
'https://en.wikipedia.org/wiki/State_Employees_Credit_Union','https://en.wikipedia.org/wiki/United_Heritage_Credit_Union']
for url in URL:
headers=[]
rows=[]
response = requests.get(url)
soup = BeautifulSoup(response.text,'html.parser')
table = soup.find('table',class_ ='infobox')
credit_union_name= soup.find('h1', id = "firstHeading")
header_tags = table.find_all('th')
headers = [header.text.strip() for header in header_tags]
data_rows = table.find_all('tr')
for row in data_rows:
value = row.find_all('td')
beautified_value = [dp.text.strip() for dp in value]
if len(beautified_value) == 0:
continue
rows.append(beautified_value)
rows.append("")
rows.append([credit_union_name.text.strip()])
rows.append([url])
with open(r'credit_unions.csv','a+',newline="") as output:
writer=csv.writer(output)
writer.writerow(headers)
writer.writerow(rows)
However, I checked the csv file and information is not being presented in tabular form. The scraped elements are being stored in nested lists instead of a singular list. I need the scraped information of each URL to be stored in a singular list and print the list in csv file in tabular form with the headings. Need help regarding this.
The infoboxes have different structures and labels. So I think the best way to solve this is to use dicts and a DictWriter.
import requests
import csv
from bs4 import BeautifulSoup
URL = ['https://en.wikipedia.org/wiki/Workers_Credit_Union',
'https://en.wikipedia.org/wiki/San_Diego_County_Credit_Union',
'https://en.wikipedia.org/wiki/USA_Federal_Credit_Union',
'https://en.wikipedia.org/wiki/Commonwealth_Credit_Union',
'https://en.wikipedia.org/wiki/Center_for_Community_Self-Help',
'https://en.wikipedia.org/wiki/ESL_Federal_Credit_Union',
'https://en.wikipedia.org/wiki/State_Employees_Credit_Union',
'https://en.wikipedia.org/wiki/United_Heritage_Credit_Union']
csv_headers = set()
csv_rows = []
for url in URL:
csv_row = {}
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
credit_union_name = soup.find('h1', id="firstHeading")
table = soup.find('table', class_='infobox')
data_rows = table.find_all('tr')
for data_row in data_rows:
label = data_row.find('th')
value = data_row.find('td')
if label is None or value is None:
continue
beautified_label = label.text.strip()
beautified_value = value.text.strip()
csv_row[beautified_label] = beautified_value
csv_headers.add(beautified_label)
csv_row["name"] = credit_union_name.text.strip()
csv_row["url"] = url
csv_rows.append(csv_row)
with open(r'credit_unions.csv', 'a+', newline="") as output:
headers = ["name", "url"]
headers += sorted(csv_headers)
writer = csv.DictWriter(output, fieldnames=headers)
writer.writeheader()
writer.writerows(csv_rows)
I try to scrape web page using python and BeautifulSoup. When I write:
table = soup.find('table')
it returns None.
and when I try get row content, it always returns empty list.
I also used Selenium and the same result empty list.
import requests
from bs4 import BeautifulSoup
import csv
url = "https://www.iea.org/data-and-statistics/data-tables?country=CANADA&energy=Balances&year=2010"
response = requests.get(url)
print(response.status_code) >>> print 200
soup = BeautifulSoup(response.text,"html.parser")
tr = soup.findAll('tr', attrs={'class': 'm-data-table__row '})
print(tr) >>> print []
print(len(tr)) >>> print 0
csvFile = open("C:/Users/User/Desktop/test27.csv",'wt',newline='', encoding='utf-8')
writer = csv.writer(csvFile)
try:
for cell in tr:
td = cell.find_all('td')
row = [i.text.replace('\n','') for i in td]
writer.writerow(row)
finally:
csvFile.close()
Any help?
When you analyse the website, the data is loaded via ajax call. The following script makes the ajax call and saves the required json to a file
import requests, json
from bs4 import BeautifulSoup
res = requests.get("https://api.iea.org/stats/?year=2010&countries=CANADA&series=BALANCES")
data = res.json()
with open("data.json", "w") as f:
json.dump(data,f)
I have a 45k+ rows CSV file, each one containing a different path of the same domain - which are structurally identical to each other - and every single one is clickable. I managed to use BeautifulSoup to scrape the title and content of each one and through the print function, I was able to validate the scraper. However, when I try to export the information gathered to a new CSV file, I only get the last URL's street name and description, and not all of them as I expected.
from bs4 import BeautifulSoup
import requests
import csv
with open('URLs.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
site = requests.get(row['addresses']).text
soup = BeautifulSoup(site, 'lxml')
StreetName = soup.find('div', class_='hist-title').text
Description = soup.find('div', class_='hist-content').text
with open('OutputList.csv','w', newline='') as output:
Header = ['StreetName', 'Description']
writer = csv.DictWriter(output, fieldnames=Header)
writer.writeheader()
writer.writerow({'StreetName' : StreetName, 'Description' : Description})
How can the output CSV have on each row the street name and description for the respective URL row in the input CSV file?
You need to open both files on the same level and then read and write on each iteration. Something like this:
from bs4 import BeautifulSoup
import requests
import csv
with open('URLs.csv') as a, open('OutputList.csv', 'w') as b:
reader = csv.reader(a)
writer = csv.writer(b, quoting=csv.QUOTE_ALL)
writer.writerow(['StreetName', 'Description'])
# Assuming url is the first field in the CSV
for url, *_ in reader:
r = requests.get(url)
if r.ok:
soup = BeautifulSoup(r.text, 'lxml')
street_name = soup.find('div', class_='hist-title').text.strip()
description = soup.find('div', class_='hist-content').text.strip()
writer.writerow([street_name, description])
I hope it helps.
I would want to scrap the data from this website and store it in csv file in this manner.
But when I try to scrap the data it is not stored in exact format. All the data is stored in the 1st column itself. I have no idea how to approach this problem.
Link : https://pce.ac.in/students/bachelors-students/
Code:
import csv # file operations
from bs4 import BeautifulSoup as soup # lib for pulling data from html/xmlsites
from urllib.request import urlopen as uReq # lib for sending and rec info over http
Url = 'https://pce.ac.in/students/bachelors-students/'
pageHtml = uReq(Url)
soup = soup(pageHtml,"html.parser") #parse the html
table = soup.find_all("table", { "class" : "tablepress tablepress-id-10 tablepress-responsive-phone" })
f = csv.writer(open('BEPillaiDepart.csv', 'w'))
f.writerow(['Choice Code', 'Course Name', 'Year of Establishment','Sanctioned Strength']) # headers
for x in table:
data=""
table_body = x.find('tbody') #find tbody tag
rows = table_body.find_all('tr') #find all tr tag
for tr in rows:
cols = tr.find_all('td') #find all td tags
for td in cols:
data=data+ "\n"+ td.text.strip()
f.writerow([data])
#print(data)
Create variable data in each tr label,you can try like this:
import csv # file operations
from bs4 import BeautifulSoup as soup # lib for pulling data from html/xmlsites
from urllib.request import urlopen as uReq # lib for sending and rec info over http
Url = 'https://pce.ac.in/students/bachelors-students/'
pageHtml = uReq(Url)
soup = soup(pageHtml,"html.parser") #parse the html
table = soup.find_all("table", { "class" : "tablepress tablepress-id-10 tablepress-responsive-phone" })
with open('BEPillaiDepart.csv', 'w',newline='') as csvfile:
f = csv.writer(csvfile)
f.writerow(['Choice Code', 'Course Name', 'Year of Establishment','Sanctioned Strength']) # headers
for x in table:
table_body = x.find('tbody') #find tbody tag
rows = table_body.find_all('tr') #find all tr tag
for tr in rows:
data=[]
cols = tr.find_all('td') #find all td tags
for td in cols:
data.append(td.text.strip())
f.writerow(data)
print(data)
If you search the meaning of csv, you would find it means comma separated values, however I don't see any commas in your text while appending it to the file.