Python scraper not writing correctly to csv - python

I'm new to python and programming, but having a bit of a problem with my project. I'm trying to scrape a website for data and save it in a csv. I works but when I'm writing the "lst" list to "Image URL" and "Image Featured" the brackets "[" and "]" and
'"' also get written to the csv file. is there a way to remove this? I know it´s because the "lst" list cointains other list with the urls.
import csv
from bs4 import BeautifulSoup
import requests
import pandas as pd
from datetime import date
today = date.today()
source = requests.get('https://www.meklarin.fo/').text
soup = BeautifulSoup(source, 'lxml')
df = pd.read_csv(r'C:\Users\username\Desktop\Kassin.fo\kassin\blog\management\commands\test.csv')
print(df.to_string())
original_house_title_list = []
original_house_link_list = []
house_titles_list = []
house_asking_price_list = []
house_current_bid_price_list = []
house_link_list = []
product = 'product'
current_date = today.strftime("%m.%d.%y")
house_image_list = []
house_location_list = []
lst = []
lst1 = []
house_info_list = []
house_final_info = []
list_convert = []
for house_link in soup.find_all('a', class_='house-air-content'):
house_link = house_link.get('href')
house_link_list.append(house_link.strip())
print(house_link.strip())
for house_link in house_link_list:
if house_link in original_house_link_list:
continue
else:
source = requests.get(house_link).text
soup = BeautifulSoup(source, 'lxml')
for house_titles in soup.find_all('div', class_='ogn-base-info'):
house_title = house_titles.h1.text
house_titles_list.append(house_title)
#print(house_title)
for house__asking_price in soup.find_all('div', class_='col-xs-12 col-sm-12 col-md-6 house-ask-price house-price-column'):
house_asking_price = house__asking_price.text
house_asking_price = str(house_asking_price)
house_asking_price = house_asking_price.removeprefix('Prísuppskotkr.')
house_asking_price = house_asking_price.replace('.','')
house_asking_price_list.append(house_asking_price.strip())
#print(house_asking_price.strip())
for house__current_bid_price in soup.find_all('div', class_='col-xs-12 col-sm-12 col-md-6 house-bid-price house-price-column'):
house_current_bid_price = house__current_bid_price.h3.text
house_current_bid_price = str(house_current_bid_price)
house_current_bid_price = house_current_bid_price.replace('.','')
house_current_bid_price = house_current_bid_price.replace('kr','')
house_current_bid_price_list.append(house_current_bid_price.strip())
print(house_current_bid_price.strip())
for house_all_images in soup.find_all('a'):
if 'https://www.meklarin.fo/wp-content/uploads' in str(house_all_images):
house_all_images = house_all_images.get('href')
house_image_list.append(house_all_images)
#print(house_all_images)
else:
continue
lst.append(house_image_list)
lst1.append(lst)
house_image_list=[]
for house_build_year in soup.find_all('div', class_='house-info-box-value'):
if 'Trýst her' in str(house_build_year):
continue
else:
print(house_build_year.text)
for house_info in soup.find_all('div', class_='house-desc-comp'):
house_info = house_info.text
house_info = str(house_info)
house_info = house_info.replace('Upplýsingar um bústaðin','')
house_info_list.append(house_info)
#print(house_info)
house_final_info.append(house_info)
house_info_list = []
dict = {'Title': house_titles_list, 'Content': house_final_info, 'Date':current_date, 'Post Type': product, 'Price': house_asking_price_list, 'Regular Price': house_asking_price_list, 'Sale Price':house_asking_price_list, 'Stock Status': 'instock', 'Image URL': lst, 'Image Title': house_titles_list, 'Image Featured': lst}
df = pd.DataFrame(dict)
df.to_csv('test.csv')
print(len(house_titles_list))
print(len(house_asking_price_list))
print(len(lst))
print(len(house_final_info))

To remove the list in the cell for (example) Image URL, before writing to the file, try:
df['Image URL'] = [','.join(map(str, i)) for i in df['Image URL']]
The line above can be copied and Image URL above can be changed to Image Featured to clean up the list in the other column.

Related

Trying to export data taken from HTML using beautiful soup

I'm trying to extract information from the HTML texts that I get from URLs that I create from a For loop and then use beautiful soup.
I get to isolate the information correctly but when I'm trying to export the data I get an error message "All arrays must be of the same length"
weblink = []
filing_type = []
company_name = []
date = []
#Importing file
df = pd.read_csv('Downloads\Dropped_Companies.csv')
#Getting companie's names into list
companies_column=list(df.columns.values)[4]
name_ = df[companies_column].tolist()
#Formatting company's names for creating URLs
for CompanyName in name_:
company_name.append(CompanyName.lower().replace(" ",'_'))
company_name
for item in range(0, len(company_name)):
link = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&company=' + company_name[item] + '&type=10-K&dateb=&owner=exclude&count=100'
#Getting the HTML text
headers = random.choice(headers_list)
r = requests.Session()
r.headers = headers
html = r.get(link).text
#Calling beautiful soup for better HTML text
soup = bs.BeautifulSoup(html)
tet_ = soup.find_all("a", id = "documentsbutton")
#Get the links
for link in tet_:
weblink.append('https://www.sec.gov' + link.get('href'))
test11 = soup.find_all("table", class_= "tableFile2")
for link in test11:
row = link.find_all("td", nowrap = "nowrap")
for i in range(0, len(row), 3):
filing_type.append(row[i].getText())
date.append(link.find("td", class_ = "small").find_next_sibling("td").text)
name.append(company_name[item])
data = {'Company Name':name,'Filing Date': date,'Filing Type':filing_type,"Weblink":weblink}
outputdf = pd.DataFrame(data)
outputdf.to_csv('Downloads/t_10KLinks.csv')
data = {'Company Name':name,'Filing Date': date,'Filing Type':filing_type,"Weblink":weblink}
outputdf = pd.DataFrame.from_dict(data, orient='index')
outputdf.to_csv('Downloads/t_10KLinks.csv')
pandas.DataFrame.from_dict documentation.

Multiple requests to Scrape different pages is giving the same result

I want to scrape the ICC cricket website and find the rankings of batsmen on a particular date. The problem is that the result that I'm getting is the same for all the dates. The scraping is giving me the list of the most recent rankings rather than the ranking at a particular date. The code is given below. Can someone tell me why this is happening/ a solution for the same?
I feel that the problem is that beautifulsoup is not letting the page load completely which is in turn giving false information as the information required is received after applying filters on the website.
from bs4 import BeautifulSoup
import pandas as pd
import requests
import re
import datetime
import os
date_list = pd.date_range(start = "1971-02-01", end=datetime.date.today(), freq='1d')
def get_batsmen(date):
url = f'https://www.icc-cricket.com/rankings/mens/player-rankings/odi/batting?at={date}'
page = requests.get(url).text
doc = BeautifulSoup(page, "html.parser")
find_class = doc.find_all("td", class_ = 'table-body__cell rankings-table__name name')
player_list = []
find_top = doc.find('div', class_='rankings-block__banner--name-large')
player_list.append(find_top.text)
for item in find_class:
player_name = item.find("a")
# print(player_name.text)
player_list.append(player_name.text)
df = pd.DataFrame(player_list, columns = ['Player Name'])
return df
def get_bowler(date):
url = f'https://www.icc-cricket.com/rankings/mens/player-rankings/odi/bowling?at={date}'
page = requests.get(url).text
doc = BeautifulSoup(page, "html.parser")
find_class = doc.find_all("td", class_ = 'table-body__cell rankings-table__name name')
player_list = []
find_top = doc.find('div', class_='rankings-block__banner--name-large')
player_list.append(find_top.text)
for item in find_class:
player_name = item.find("a")
# print(player_name.text)
player_list.append(player_name.text)
df = pd.DataFrame(player_list, columns = ['Player Name'])
return df
def get_allrounder(date):
url = f'https://www.icc-cricket.com/rankings/mens/player-rankings/odi/all-rounder?at={date}'
page = requests.get(url).text
doc = BeautifulSoup(page, "html.parser")
find_class = doc.find_all("td", class_ = 'table-body__cell rankings-table__name name')
player_list = []
find_top = doc.find('div', class_='rankings-block__banner--name-large')
player_list.append(find_top.text)
for item in find_class:
player_name = item.find("a")
# print(player_name.text)
player_list.append(player_name.text)
df = pd.DataFrame(player_list, columns = ['Player Name'])
return df
#Storing the data into multiple csvs
for date in date_list:
year = date.year
month = date.month
day = date.day
newpath = rf'C:\Users\divya\OneDrive\Desktop\8th Sem\ISB assignment\{year}'
if not os.path.exists(newpath):
os.makedirs(newpath)
newpath1 = rf'C:\Users\divya\OneDrive\Desktop\8th Sem\ISB assignment\{year}\{month}'
if not os.path.exists(newpath1):
os.makedirs(newpath1)
newpath2 = rf'C:\Users\divya\OneDrive\Desktop\8th Sem\ISB assignment\{year}\{month}\{day}'
if not os.path.exists(newpath2):
os.makedirs(newpath2)
get_batsmen(date).to_csv(newpath2+'/batsmen.csv')
get_bowler(date).to_csv(newpath2+'/bowler.csv')
get_allrounder(date).to_csv(newpath2+'/allrounder.csv')
In case the website that you're scraping is interactive it can be good to look at Selenium as scraping package instead of bs4 so javascript is executed.

How do I fix/prevent Data Overwriting Issue in Web Scrape Loop?

I was able to loop the web scraping process, but the data collected from the page that comes after replaces the data from the page before. Making the excel contains only the data from the last page. What do I need to do?
from bs4 import BeautifulSoup
import requests
import pandas as pd
print ('all imported successfuly')
for x in range(1, 44):
link = (f'https://www.trustpilot.com/review/birchbox.com?page={x}')
print (link)
req = requests.get(link)
content = req.content
soup = BeautifulSoup(content, "lxml")
names = soup.find_all('div', attrs={'class': 'consumer-information__name'})
headers = soup.find_all('h2', attrs={'class':'review-content__title'})
bodies = soup.find_all('p', attrs={'class':'review-content__text'})
ratings = soup.find_all('div', attrs={'class':'star-rating star-rating--medium'})
dates = soup.find_all('div', attrs={'class':'review-content-header__dates'})
print ('pass1')
df = pd.DataFrame({'User Name': names, 'Header': headers, 'Body': bodies, 'Rating': ratings, 'Date': dates})
df.to_csv('birchbox006.csv', index=False, encoding='utf-8')
print ('excel done')
Because you are using a loop the variables are being constantly overwritten. Normally what you'd do in a situation like this is have an array and then append to it throughout the loop:
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
print ('all imported successfuly')
# Initialize an empty dataframe
df = pd.DataFrame()
for x in range(1, 44):
names = []
headers = []
bodies = []
ratings = []
published = []
updated = []
reported = []
link = (f'https://www.trustpilot.com/review/birchbox.com?page={x}')
print (link)
req = requests.get(link)
content = req.content
soup = BeautifulSoup(content, "lxml")
articles = soup.find_all('article', {'class':'review'})
for article in articles:
names.append(article.find('div', attrs={'class': 'consumer-information__name'}).text.strip())
headers.append(article.find('h2', attrs={'class':'review-content__title'}).text.strip())
try:
bodies.append(article.find('p', attrs={'class':'review-content__text'}).text.strip())
except:
bodies.append('')
try:
ratings.append(article.find('p', attrs={'class':'review-content__text'}).text.strip())
except:
ratings.append('')
dateElements = article.find('div', attrs={'class':'review-content-header__dates'}).text.strip()
jsonData = json.loads(dateElements)
published.append(jsonData['publishedDate'])
updated.append(jsonData['updatedDate'])
reported.append(jsonData['reportedDate'])
# Create your temporary dataframe of the first iteration, then append that into your "final" dataframe
temp_df = pd.DataFrame({'User Name': names, 'Header': headers, 'Body': bodies, 'Rating': ratings, 'Published Date': published, 'Updated Date':updated, 'Reported Date':reported})
df = df.append(temp_df, sort=False).reset_index(drop=True)
print ('pass1')
df.to_csv('birchbox006.csv', index=False, encoding='utf-8')
print ('excel done')
The reason is because you are overwriting your variables in each iteration.
If you want to extend this variables, you can do for example:
names = []
bodies = []
ratings = []
dates = []
for x in range(1, 44):
link = (f'https://www.trustpilot.com/review/birchbox.com?page={x}')
print (link)
req = requests.get(link)
content = req.content
soup = BeautifulSoup(content, "lxml")
names += soup.find_all('div', attrs={'class': 'consumer-information__name'})
headers += soup.find_all('h2', attrs={'class':'review-content__title'})
bodies += soup.find_all('p', attrs={'class':'review-content__text'})
ratings += soup.find_all('div', attrs={'class':'star-rating star-rating--medium'})
dates += soup.find_all('div', attrs={'class':'review-content-header__dates'})
You'll have to store that data after each iteration somewhere. Theres a few ways you can do it. You can just store everythin in a list, then create your dataframe. Or what I did is create a "temporary" dataframe that is created after each iteration, then append that into the final dataframe. Think of it like bailing water. You have a small bucket of water, to then empty into a large bucket, that will collect/hold all the water you are trying to collect.
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
print ('all imported successfuly')
# Initialize an empty dataframe
df = pd.DataFrame()
for x in range(1, 44):
published = []
updated = []
reported = []
link = (f'https://www.trustpilot.com/review/birchbox.com?page={x}')
print (link)
req = requests.get(link)
content = req.content
soup = BeautifulSoup(content, "lxml")
names = [ x.text.strip() for x in soup.find_all('div', attrs={'class': 'consumer-information__name'})]
headers = [ x.text.strip() for x in soup.find_all('h2', attrs={'class':'review-content__title'})]
bodies = [ x.text.strip() for x in soup.find_all('p', attrs={'class':'review-content__text'})]
ratings = [ x.text.strip() for x in soup.find_all('div', attrs={'class':'star-rating star-rating--medium'})]
dateElements = soup.find_all('div', attrs={'class':'review-content-header__dates'})
for date in dateElements:
jsonData = json.loads(date.text.strip())
published.append(jsonData['publishedDate'])
updated.append(jsonData['updatedDate'])
reported.append(jsonData['reportedDate'])
# Create your temporary dataframe of the first iteration, then append that into your "final" dataframe
temp_df = pd.DataFrame({'User Name': names, 'Header': headers, 'Body': bodies, 'Rating': ratings, 'Published Date': published, 'Updated Date':updated, 'Reported Date':reported})
df = df.append(temp_df, sort=False).reset_index(drop=True)
print ('pass1')
df.to_csv('birchbox006.csv', index=False, encoding='utf-8')
print ('excel done')

Google Play Crawler results - save to CSV

I made a simple crawler that extracts CSV file with google play packages like com.viber.voip and go to the full link like https://play.google.com/store/apps/details?id=com.viber.voip&hl=en.
Then it crawles the title, publisher, downloads etc and stores into a list.
The problem is, when Im trying to save the results into CSV file it throws me an error if Im exporting using pandas to_csv. Or throws UnicodeError when it finds some unknown characters. I tried to add .encode or .decode but it doesnt help. Can someone assist please?
import bs4 as bs
import urllib.request
import pandas as pd
import csv
def searcher(bundles):
html = urllib.request.urlopen(base_url+bundles+post_url).read()
soup = bs.BeautifulSoup(html, 'html.parser')
title_app = soup.title.get_text()
publisher_name = soup.find('a', {'class':'document-subtitle primary'}).get_text()
category = soup.find('a', {'class':'document-subtitle category'}).get_text()
ratings = soup.find('meta', {'itemprop':'ratingValue'}).get('content')
reviews = soup.find('span', {'class':'reviews-num'}).get_text()
downloads = soup.find('div', {'itemprop':'numDownloads'}).get_text()
updated_last_time = soup.find('div', {'class':'content'}).get_text()
text = (bundles, title_app, publisher_name, category, ratings, reviews, downloads, updated_last_time)
return (text)
def store(crawled_data):
writer = csv.writer(f)
labels = ['bundles', 'title_app', 'publisher_name', 'category', 'ratings', 'reviews', 'downloads', 'updated_last_time']
writer.writerow(labels)
df = pd.DataFrame(crawled_data)
for row in df:
if row != None:
writer.writerow(row)
base_url = 'https://play.google.com/store/apps/details?id='
post_url = '&hl=en'
crawled_data = []
crawled_packages = 0
with open('links.csv', 'r') as f:
df = pd.read_csv(f)
urls = df['URLs']
for bundles in urls:
if bundles != None:
aaa = searcher(bundles)
print(crawled_packages)
crawled_packages += 1
if crawled_data != None:
crawled_data.append(aaa)
store(crawled_data)
You can use to_csv() by specifying an output file to use. Also specify your column names whilst building the dataframe:
import bs4 as bs
import urllib.request
import pandas as pd
import csv
def searcher(bundles):
html = urllib.request.urlopen(base_url+bundles+post_url).read()
soup = bs.BeautifulSoup(html, 'html.parser')
title_app = soup.title.get_text()
publisher_name = soup.find('a', {'class':'document-subtitle primary'}).get_text(strip=True)
category = soup.find('a', {'class':'document-subtitle category'}).get_text(strip=True)
ratings = soup.find('meta', {'itemprop':'ratingValue'}).get('content')
reviews = soup.find('span', {'class':'reviews-num'}).get_text(strip=True)
downloads = soup.find('div', {'itemprop':'numDownloads'}).get_text(strip=True)
updated_last_time = soup.find('div', {'class':'content'}).get_text(strip=True)
return (bundles, title_app, publisher_name, category, ratings, reviews, downloads, updated_last_time)
def store(crawled_data):
labels = ['bundles', 'title_app', 'publisher_name', 'category', 'ratings', 'reviews', 'downloads', 'updated_last_time']
df = pd.DataFrame(crawled_data, columns=labels)
df.to_csv('output.csv', index=False)
base_url = 'https://play.google.com/store/apps/details?id='
post_url = '&hl=en'
crawled_data = []
crawled_packages = 0
with open('links.csv', 'r') as f:
df = pd.read_csv(f)
urls = df['URLs']
for bundles in urls:
if bundles != None:
aaa = searcher(bundles)
print(crawled_packages)
crawled_packages += 1
if crawled_data != None:
crawled_data.append(aaa)
store(crawled_data)
This will give you an output.csv file containing:
bundles,title_app,publisher_name,category,ratings,reviews,downloads,updated_last_time
com.viber.voip,Viber Messenger - Android Apps on Google Play,Viber Media S.à r.l.,Communication,4.336112022399902,"11,016,404","500,000,000 - 1,000,000,000","March 15, 2018"

How can I make the output with pair of list : content in my python code?

I have been developing a python web-crawler for this website. I made two functions, which works well as separately.
One is to collect the list of stocks and
Another is to collect the content data of each list.
I would like to make the output of my code with pairs of
"list#1/content#1",
"list#2/content#2",
"list#3/content#3",
What needs to be modified in my code in order to achieve this?
Thanks.
from bs4 import BeautifulSoup
import urllib.request
CAR_PAGE_TEMPLATE = "http://www.bobaedream.co.kr/cyber/CyberCar.php?gubun=I&page="
BASE_PAGE = 'http://www.bobaedream.co.kr'
def fetch_post_list():
for i in range(20,21):
URL = CAR_PAGE_TEMPLATE + str(i)
res = urllib.request.urlopen(URL)
html = res.read()
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table', class_='cyber')
#print ("Page#", i)
# 50 lists per each page
lists=table.find_all('tr', itemtype="http://schema.org/Article")
count=0
for lst in lists:
if lst.find_all('td')[3].find('em').text:
lst_price=lst.find_all('td')[3].find('em').text
lst_title=lst.find_all('td')[1].find('a').text
lst_link = lst.find_all('td')[1].find('a')['href']
lst_photo_url=''
if lst.find_all('td')[0].find('img'):
lst_photo_url = lst.find_all('td')[0].find('img')['src']
count+=1
else: continue
#print('#',count, lst_title, lst_photo_url, lst_link, lst_price)
return lst_link
def fetch_post_content(lst_link):
URL = BASE_PAGE + lst_link
res = urllib.request.urlopen(URL)
html = res.read()
soup = BeautifulSoup(html, 'html.parser')
#Basic Information
table = soup.find('div', class_='rightarea')
# Number, Year, Mileage, Gas Type, Color, Accident
content_table1 = table.find_all('div')[0]
dds = content_table1.find_all('dd')
for dd in dds:
car_span_t = dd.find_all('span', {'class': 't'})[0]
car_span_s = dd.find_all('span', {'class': 's'})[0]
#print(car_span_t.text, ':', car_span_s.text)
# Seller Information
content_table2 = table.find_all('div')[1]
dds2 = content_table2.find_all('dd')
for dd2 in dds2:
seller_span_t = dd.find_all('span', {'class': 't'})[0]
seller_span_s = dd.find_all('span', {'class': 's'})[0]
#print(seller_span_t.text, ':', seller_span_s.text)
return dds

Categories

Resources