I am trying to write data into a csv after scraping using pandas dataframe, but the csv is empty even after program execution. The headers are written first but they are also overwritten when dataframe comes into action.
Here is the code:
from bs4 import BeautifulSoup
import requests
import re as resju
import csv
import pandas as pd
re = requests.get('https://www.farfeshplus.com/Video.asp?ZoneID=297')
soup = BeautifulSoup(re.content, 'html.parser')
links = soup.findAll('a', {'class': 'opacityit'})
links_with_text = [a['href'] for a in links]
headers = ['Name', 'LINK']
# this is output file, u can change the path as you desire, default is the working directory
file = open('data123.csv', 'w', encoding="utf-8")
writer = csv.writer(file)
writer.writerow(headers)
for i in links_with_text:
new_re = requests.get(i)
new_soup = BeautifulSoup(new_re.content, 'html.parser')
m = new_soup.select_one('h1 div')
Name = m.text
print(Name)
n = new_soup.select_one('iframe')
ni = n['src']
iframe = requests.get(ni)
i_soup = BeautifulSoup(iframe.content, 'html.parser')
d_script = i_soup.select_one('body > script')
d_link = d_script.text
mp4 = resju.compile(r"(?<=mp4:\s\[\')(.*)\'\]")
final_link = mp4.findall(d_link)[0]
print(final_link)
df = pd.DataFrame(zip(Name, final_link))
df.to_csv(file, header=None, index=False)
file.close()
df.head() returns:
0 1
0 ل h
1 ي t
2 ل t
3 ى p
4 s
0 1
0 ل h
1 ي t
2 ل t
3 ى p
4 s
Any suggestion ?
It seems you are using a mix of libraries to write to a csv, pandas handles all this nicely so there is no need to use the inbuilt csv module from python -
I've modified your code below - this should return your dataframe as a whole df and write it out as a csv.
also using Headers=None you were setting the columns to nothing, so they would be referenced by an index number.
from bs4 import BeautifulSoup
import requests
import re as resju
#import csv
import pandas as pd
re = requests.get('https://www.farfeshplus.com/Video.asp?ZoneID=297')
soup = BeautifulSoup(re.content, 'html.parser')
links = soup.findAll('a', {'class': 'opacityit'})
links_with_text = [a['href'] for a in links]
names_ = [] # global list to hold all iterable variables from your loops
final_links_ = []
for i in links_with_text:
new_re = requests.get(i)
new_soup = BeautifulSoup(new_re.content, 'html.parser')
m = new_soup.select_one('h1 div')
Name = m.text
names_.append(name) # append to global list.
print(Name)
n = new_soup.select_one('iframe')
ni = n['src']
iframe = requests.get(ni)
i_soup = BeautifulSoup(iframe.content, 'html.parser')
d_script = i_soup.select_one('body > script')
d_link = d_script.text
mp4 = resju.compile(r"(?<=mp4:\s\[\')(.*)\'\]")
final_link = mp4.findall(d_link)[0]
print(final_link)
final_links_.append(final_link) # append to global list.
df = pd.DataFrame(zip(names_, final_links_)) # use global lists.
df.columns = ['Name', 'LINK']
df.to_csv(file, index=False)
Related
I need to set up a table as follows
date
time
Agua 6 Horas
Agua Diario
(QFE)
Radiación
Humedad
Viento promedio
Viento Temperatura del Aire Seco
this is the station link in table 6 there are the items of interest that fill each of the columns of the table that I intend to assemble
each desired element has an index in this table
the ones that matter are 57,60,64,89,66,28,26
I need to get the data from this table, and fill each of the columns of the table that I want to build with this data
when accessing an element of the table we have the data grouped in another table
That's how I thought of getting the data, I just can't gather them in a single table, if anyone can help me I would be grateful
import csv
from os import PRIO_PGRP
# from msilib.schema import tables
from pydoc import pager
from re import A
import re
from cv2 import pencilSketch
import requests
from bs4 import BeautifulSoup
import pandas as pd
from lxml import html
from urllib.parse import urljoin
# filename = "data.csv"
# f = open(filename, "w", newline ='')
# data = csv.writer(f)
pages = [
'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/57/119',
'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/60/125'
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/64/130',
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/89/200',
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/66/132',
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330007/28/61',
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330007/28/60',
# 'https://climatologia.meteochile.gob.cl/application/informacion/inventarioComponentesPorEstacion/330006/26/58'
]
def make_soup(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, 'lxml')
return soup
def get_links(url):
soup = make_soup(url)
a_tags = soup.find_all('a', href=re.compile(r"^/application/informacion/datosMensualesDelElemento/330006/"))
links = [urljoin(url, a.get('href')) for a in a_tags]
return links
def get_data(link):
soup = make_soup(link)
table = soup.find('table', {'class': 'table table-striped table-bordered'})
data = [[' '.join(item.text.split()) for item in tcel.select("td")]
for tcel in table.select("tr")]
return data
if __name__ == '__main__':
# data = []
# dataframe = []
# df = pd.DataFrame()
# for page in pages:
# make_soup(page)
# links = get_links(page)
# # print(links)
# for link in links:
# data.append(get_data(link))
# break
# dataframe.append(pd.DataFrame(data))
# df = pd.concat(dataframe, axis=0)
# print(df)
# make_soup(url)
# links = get_links(url)
# for link in links:
# data.append(get_data(link))
# print(data)
# break
# df = pd.DataFrame(data)
# df.to_csv('data.csv', index=False)
# df = pd.DataFrame(data)
# print(df)
columns_name = ['Data Medicao', 'Hora Medicao', 'RRR6 (mm)', 'Traza ()']
data = []
# for i in range(len(data)+1):
# print(data[i])
# df = pd.DataFrame(data)
for page in pages:
links = get_links(pages[0])
for link in links:
data.append(get_data(link))
df = pd.DataFrame(data)
df = df.iloc[1:]
df.to_csv('data.csv', index=False)
print(df)
# links = get_links(url)
# for link in links:
# data.writerows(get_data(link))
I want to put the data I'm scraping from the website into a csv file, my first attempt was using scrapy but I couldn't get the syntax right. When I managed to do using beautifulsoup, I just don't know how to put it into a csv file.
import requests
from bs4 import BeautifulSoup
URL = "https://www.practo.com/tests/glycosylated-haemoglobin-blood/p?city=delhi"
page = requests.get(URL)
#print(page)
soup=BeautifulSoup(page.content,'html.parser')
results = soup.find(id='root-app')
#print(results.prettify())
job_elems = results.find_all('div', class_='u-padrl--std')
#<h1 class="o-font-size--24 u-font-bold u-marginb--std">HbA1c Test</h1
for job_elem in job_elems:
title_elem = job_elem.find('h1', class_='o-font-size--24 u-font-bold u-marginb--std')
also_known = job_elem.find('span',class_="u-font-bold")
cost = job_elem.find('div',class_="o-font-size--22 u-font-bold o-f-color--primary")
what_test = job_elem.find('div',class_="c-markdown--unstyled")
#test_prep = job_elem.find('div',class_="c-tabsection__content c-pp__accordion-item__content active")
#temp = job_elem.find('p')
print(title_elem.text)
print(also_known.text)
print(cost.text)
print(what_test.text)
#print(temp.text)
#print(test_prep.text)
print()
text_content = results.find_all('div',class_='c-markdown--unstyled')
# c-tabsection__content c-pp__accordion-item__content active
# c-tabsection c-pp__accordion-item u-br-rule u-padtb--std--half active
for item in text_content:
prep = item.find('p')
#,class_="c-tabsection__content c-pp__accordion-item__content active")
print(prep.text)
print('xxo')
import requests
from bs4 import BeautifulSoup
# import the csv module
import csv
URL = "https://www.practo.com/tests/glycosylated-haemoglobin-blood/p?city=delhi"
page = requests.get(URL)
#print(page)
soup=BeautifulSoup(page.content,'html.parser')
results = soup.find(id='root-app')
#print(results.prettify())
job_elems = results.find_all('div', class_='u-padrl--std')
#<h1 class="o-font-size--24 u-font-bold u-marginb--std">HbA1c Test</h1
rows = []
fields = ['title_elem', 'also_known', 'cost', 'what_test']
filename = "myfile.csv"
for job_elem in job_elems:
title_elem = job_elem.find('h1', class_='o-font-size--24 u-font-bold u-marginb--std').text.encode("utf-8")
also_known = job_elem.find('span',class_="u-font-bold").text.encode("utf-8")
cost = job_elem.find('div',class_="o-font-size--22 u-font-bold o-f-color--primary").text.encode("utf-8")
what_test = job_elem.find('div',class_="c-markdown--unstyled").text.encode("utf-8")
row = [title_elem, also_known, cost, what_test]
rows.append(row)
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(fields)
# writing the data rows
csvwriter.writerows(rows)
text_content = results.find_all('div',class_='c-markdown--unstyled')
# c-tabsection__content c-pp__accordion-item__content active
# c-tabsection c-pp__accordion-item u-br-rule u-padtb--std--half active
for item in text_content:
prep = item.find('p')
#,class_="c-tabsection__content c-pp__accordion-item__content active")
print(prep.text)
print('xxo')
You can use the xlsxwriter library.
import xlsxwriter
workbook = xlsxwriter.Workbook("file.xlsx")
worksheet = workbook.add_worksheet()
worksheet.write(row, column, element)
workbook.close()
How can I export DataFrame to excel without subscribe?
For exemple:
I'm doing webscraping and there is a table with pagination, so I take the page 1 save it in DataFrame, export to excel e do it again in page 2. But every record is erased when a save it remaining the last one.
Sorry for my english, here is my code:
import time
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
i=1
url = "https://stats.nba.com/players/traditional/?PerMode=Totals&Season=2019-20&SeasonType=Regular%20Season&sort=PLAYER_NAME&dir=-1"
driver = webdriver.Firefox(executable_path=r'C:/Users/Fabio\Desktop/robo/geckodriver.exe')
driver.get(url)
time.sleep(5)
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]/div[1]/table/thead/tr/th[9]").click()
contador = 1
#loop pagination
while(contador < 4):
#findind table
elemento = driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]")
html_content = elemento.get_attribute('outerHTML')
# 2. Parse HTML - BeaultifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find(name='table')
# 3. Data Frame - Pandas
df_full = pd.read_html(str(table))[0]
df = df_full[['PLAYER','TEAM', 'PTS']]
df.columns = ['jogador','time', 'pontuacao']
dados1 = pd.DataFrame(df)
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[1]/div/div/a[2]").click()
contador = contador + 1
#4. export to excel
dados = pd.DataFrame(df)
dados.to_excel("fabinho.xlsx")
driver.quit()
You are re-assigning df to whatever data you retrieved everytime you go through the loop. A solution would be to append the data to a list and then pd.concat the list at the end.
import time
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
i=1
url = "https://stats.nba.com/players/traditional/?PerMode=Totals&Season=2019-20&SeasonType=Regular%20Season&sort=PLAYER_NAME&dir=-1"
driver = webdriver.Firefox(executable_path=r'C:/Users/Fabio\Desktop/robo/geckodriver.exe')
driver.get(url)
time.sleep(5)
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]/div[1]/table/thead/tr/th[9]").click()
contador = 1
df_list = list()
#loop pagination
while(contador < 4):
#findind table
elemento = driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[2]")
html_content = elemento.get_attribute('outerHTML')
# 2. Parse HTML - BeaultifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
table = soup.find(name='table')
# 3. Data Frame - Pandas
df_full = pd.read_html(str(table))[0]
df = df_full[['PLAYER','TEAM', 'PTS']]
df.columns = ['jogador','time', 'pontuacao']
df_list.append(df)
driver.find_element_by_xpath("/html/body/main/div[2]/div/div[2]/div/div/nba-stat-table/div[1]/div/div/a[2]").click()
contador = contador + 1
#4. export to excel
dados = pd.concat(df_list)
dados.to_excel("fabinho.xlsx")
driver.quit()
My current code is cutting first 6 characters from file names while downloading PDF's. So for example PDF file name is 123456acII.pdf (https://example.com/wp-content/uploads/2016/11/123456acII.pdf) but file in folder is acII.pdf.
How to make names be as they are?
import os
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
main = "https://example.com/"
#If there is no such folder, the script will create one automatically
folder_location = r'C:\temp\webscraping'
if not os.path.exists(folder_location):os.mkdir(folder_location)
def Get_Links():
r = requests.get(main).text
soup = BeautifulSoup(r, 'html.parser')
links = []
for item in soup.findAll("div", {'class': 'large-4 medium-4 columns'}):
for n in item.find_all('a'):
print ('Link: '+ n.get('href'))
links.append(n.get('href'))
return links
def Parse_Links():
pdf = set()
for url in Get_Links():
r = requests.get(url).text
soup = BeautifulSoup(r, 'html.parser')
for item in soup.findAll("div", {'class': 'large-6 medium-8 columns large-centered'}):
for link in item.findAll("a"):
link = link.get("href")
if link:
pdf.add(link)
return pdf
def Save():
for item in Parse_Links():
print(f"Downloading File: {item[55:]}")
filename = os.path.join(folder_location,f"{item[55:]}")
r = requests.get(item)
with open(filename, 'wb') as f:
f.write(r.content)
print("done")
Save()
It looks like you are slicing the string starting at index position 55 {item[55:]}. Try to see if it's simply just starting your index position 6 positions prior:
change to: {item[49:]}
import openpyxl
xl_file = openpyxl.Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item in data1:
xl_sheet.append([item.get_text()])
This is what I've done for crawling stock data from the site.
I've successfully crawled the data of stock.
However, I couldn't save the data into excel file.
I've tried it, but it only showed the date data without the price data.
How could I convert results to excel file?
There were 2 things you misssed,
1) Mistake in importing packages
2) Did not append data2 in excel which contains prices
Here is the final code which will give your desired output. Just put your folder location for saving excel file.
import time
from openpyxl import Workbook #
xl_file = Workbook()
xl_sheet =xl_file.active
from urllib.request import urlopen
from bs4 import BeautifulSoup
i = 0
stockItem = '028300'
url = 'http://finance.naver.com/item/sise_day.nhn?code='+ stockItem
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
maxPage=source.find_all("table",align="center")
mp = maxPage[0].find_all("td",class_="pgRR")
mpNum = int(mp[0].a.get('href')[-3:])
for page in range(1, 10):
print (str(page) )
url = 'http://finance.naver.com/item/sise_day.nhn?code=' + stockItem +'&page='+ str(page)
html = urlopen(url)
source = BeautifulSoup(html.read(), "html.parser")
srlists=source.find_all("tr")
isCheckNone = None
if((page % 1) == 0):
time.sleep(0)
for i in range(1,len(srlists)-1):
if(srlists[i].span != isCheckNone):
srlists[i].td.text
data1 = srlists[i].find_all("td",align="center")
data2 = srlists[i].find_all("td",class_="num")
#print(srlists[i].find_all("td",align="center")[0].text, srlists[i].find_all("td",class_="num")[0].text )
for item1,item2 in zip(data1,data2):
xl_sheet.append([item.get_text(),item2.get_text()])
print(xl_sheet)
xl_file.save(r'C:\Users\Asus\Desktop\vi.xlsx')
Suggestion : You can use Yahoofinance package for python to download stock data easily.
you can follow this link >> https://pypi.org/project/yfinance/