UnicodeEncodeError: Scraping data using Python and beautifulsoup4 - python
I am trying to scrape data from the PGA website to get a list of all the golf courses in the USA. I want to scrape the data and input into a CSV file. My problem is after running my script I get this error. Can anyone help fix this error and how I can go about extracting the data?
Here is the error message:
File "/Users/AGB/Final_PGA2.py", line 44, in
writer.writerow(row)
UnicodeEncodeError: 'ascii' codec can't encode character u'\u201c' in
position 35: ordinal not in range(128)
Script Below;
import csv
import requests
from bs4 import BeautifulSoup
courses_list = []
for i in range(906): # Number of pages plus one
url = "http://www.pga.com/golf-courses/search?page={}&searchbox=Course+Name&searchbox_zip=ZIP&distance=50&price_range=0&course_type=both&has_events=0".format(i)
r = requests.get(url)
soup = BeautifulSoup(r.content)
g_data2=soup.find_all("div",{"class":"views-field-nothing"})
for item in g_data2:
try:
name = item.contents[1].find_all("div",{"class":"views-field-title"})[0].text
print name
except:
name=''
try:
address1=item.contents[1].find_all("div",{"class":"views-field-address"})[0].text
except:
address1=''
try:
address2=item.contents[1].find_all("div",{"class":"views-field-city-state-zip"})[0].text
except:
address2=''
try:
website=item.contents[1].find_all("div",{"class":"views-field-website"})[0].text
except:
website=''
try:
Phonenumber=item.contents[1].find_all("div",{"class":"views-field-work-phone"})[0].text
except:
Phonenumber=''
course=[name,address1,address2,website,Phonenumber]
courses_list.append(course)
with open ('PGA_Final.csv','a') as file:
writer=csv.writer(file)
for row in courses_list:
writer.writerow(row)
You should not get the error on Python 3. Here's code example that fixes some unrelated issues in your code. It parses specified fields on a given web-page and saves them as csv:
#!/usr/bin/env python3
import csv
from urllib.request import urlopen
import bs4 # $ pip install beautifulsoup4
page = 905
url = ("http://www.pga.com/golf-courses/search?page=" + str(page) +
"&searchbox=Course+Name&searchbox_zip=ZIP&distance=50&price_range=0"
"&course_type=both&has_events=0")
with urlopen(url) as response:
field_content = bs4.SoupStrainer('div', 'views-field-nothing')
soup = bs4.BeautifulSoup(response, parse_only=field_content)
fields = [bs4.SoupStrainer('div', 'views-field-' + suffix)
for suffix in ['title', 'address', 'city-state-zip', 'website', 'work-phone']]
def get_text(tag, default=''):
return tag.get_text().strip() if tag is not None else default
with open('pga.csv', 'w', newline='') as output_file:
writer = csv.writer(output_file)
for div in soup.find_all(field_content):
writer.writerow([get_text(div.find(field)) for field in fields])
with open ('PGA_Final.csv','a') as file:
writer=csv.writer(file)
for row in courses_list:
writer.writerow(row)
Change that to:
with open ('PGA_Final.csv','a') as file:
writer=csv.writer(file)
for row in courses_list:
writer.writerow(row.encode('utf-8'))
Or:
import codecs
....
with codecs.open('PGA_Final.csv','a', encoding='utf-8') as file:
writer=csv.writer(file)
for row in courses_list:
writer.writerow(row)
Related
csv.writer not writing entire output to CSV file
I am attempting to scrape the artists' Spotify streaming rankings from Kworb.net into a CSV file and I've nearly succeeded except I'm running into a weird issue. The code below successfully scrapes all 10,000 of the listed artists into the console: import requests from bs4 import BeautifulSoup import csv URL = "https://kworb.net/spotify/artists.html" result = requests.get(URL) src = result.content soup = BeautifulSoup(src, 'html.parser') table = soup.find('table', id="spotifyartistindex") header_tags = table.find_all('th') headers = [header.text.strip() for header in header_tags] rows = [] data_rows = table.find_all('tr') for row in data_rows: value = row.find_all('td') beautified_value = [dp.text.strip() for dp in value] print(beautified_value) if len(beautified_value) == 0: continue rows.append(beautified_value) The issue arises when I use the following code to save the output to a CSV file: with open('artist_rankings.csv', 'w', newline="") as output: writer = csv.writer(output) writer.writerow(headers) writer.writerows(rows) For whatever reason, only 738 of the artists are saved to the file. Does anyone know what could be causing this? Thanks so much for any help!
As an alternative approach, you might want to make your life easier next time and use pandas. Here's how: import requests import pandas as pd source = requests.get("https://kworb.net/spotify/artists.html") df = pd.concat(pd.read_html(source.text, flavor="bs4")) df.to_csv("artists.csv", index=False) This outputs a .csv file with 10,000 artists.
Iteration Url Load from CSV in Python
Please Help me I have a data url in the CSV file, in that file there are 100 rows and 1 column, I want to load data line 1 to line 100 from CSV using Python, how do I write the code line? However, after running the repetition can only work once in one of the lines does not reach the end of the url in the CSV and does not continue to the next URL. disc_information = html.find('div', class_='alert alert-info global-promo').text.strip().strip('\n') AttributeError: 'NoneType' object has no attribute 'text' how do I get through if an error occurs when html is not found? the following line of code I use python, please help so that the looping scrape runs to the end of the url list from bs4 import BeautifulSoup import requests import pandas as pd import csv import pandas with open('Url Torch.csv','rt') as f: data = csv.reader(f, delimiter=',') for row in data: URL_GO = row[2] def variable_Scrape(url): try: cookies = dict(cookie="............") request = requests.get(url, cookies=cookies) html = BeautifulSoup(request.content, 'html.parser') title = html.find('div', class_='title').text.strip().strip('\n') desc = html.find('div', class_='content').text link = html.find_all('img', class_='lazyload slide-item owl-lazy') normal_price = html.find('div', class_='amount public').text.strip().strip('\n') disc_information = html.find('div', class_='alert alert-info global-promo').text.strip().strip('\n') except AttributeError as e: print(e) #ConnectionAbortedError return False else: print(title) #print(desc) #print(link) finally: print(title) print(desc) print(link) print('Finally.....') variable_Scrape(URL_GO)
Is hard to give you the exact answer without seeing you csv file but try this: import csv f = open('you_file.csv') csv_f = csv.reader(f) for row in csv_f: print row[0]
This is the code import csv data = [] #create an empty list to store rows on it with open('emails.csv') as csv_file: reader = csv.reader(csv_file) for row in reader: data.append(row) #add each row to the list Based on your comments about passing a loop when the url is not ok: for url in data: # data is the list where url stored try: # do your code here (requests, beautifulsoup) : # r = requests.get(url) ... except: pass # will go to the next loop (next url) if an error happens
Python code scraping data from kickstarter does not work after some iteration
I try to scrape data from kickstarter, the code is working but it gives the following error in page 15 (you might get get error in different page since webpage is dynamic): Traceback (most recent call last): File "C:\Users\lenovo\kick.py", line 30, in csvwriter.writerow(row) File "C:\Users\lenovo\AppData\Local\Programs\Python\Python37\lib\encodings\cp1252.py", line 19, in encode return codecs.charmap_encode(input,self.errors,encoding_table)[0] UnicodeEncodeError: 'charmap' codec can't encode character '\uff5c' in position 27: character maps to What might be the issue? Any suggestion? from urllib.request import urlopen from bs4 import BeautifulSoup import json import csv KICKSTARTER_SEARCH_URL = "https://www.kickstarter.com/discover/advanced?category_id=16&sort=newest&seed=2502593&page={}" DATA_FILE = "kickstarter.csv" csvfile = open(DATA_FILE, 'w') csvwriter = csv.writer(csvfile, delimiter=',') page_start = 0 while True: url = KICKSTARTER_SEARCH_URL.format(page_start) print(url) response = urlopen(url) html = response.read() soup = BeautifulSoup(html, 'html.parser') project_details_divs = soup.findAll('div', {"class":"js-react-proj-card"}) if len(project_details_divs) == 0: break; for div in project_details_divs: project = json.loads(div['data-project']) row = [project["id"],project["name"],project["goal"],project["pledged"]] csvwriter.writerow(row) page_start +=1 csvfile.close()
Add the argument encoding to your file-opener. I mean, change csvfile = open(DATA_FILE, 'w') into csvfile = open(DATA_FILE, 'w', encoding='utf-8') But the practice on that matter is rather to use a context manager with open(DATA_FILE, 'w', encoding='utf-8') as csvfile: # ...
web scraping to download pdf file from web using python but getting blank file
I am trying to download pdf report from web using python, however the code is returning a blank pdf report at the end, may i know whats wrong with the code and where i am going wrong. ============================================= from BeautifulSoup import BeautifulSoup import urllib2 import re html_page = urllib2.urlopen("http://www.imd.gov.in/Welcome%20To%20IMD/Welcome.php") soup = BeautifulSoup(html_page) b = soup.findAll('a', attrs={'href': re.compile("^http://hydro.imd.gov.in/hydrometweb/")}) c = b[0]['href'] d = c[0:len(c)-12] e = d + "PdfReportPage.aspx?ImgUrl=PRODUCTS/Rainfall_Statistics/Cumulative/District_RF_Distribution/DISTRICT_RAINFALL_DISTRIBUTION_COUNTRY_INDIA_cd.PDF" def download_file(download_url): response = urllib2.urlopen(download_url) file = open("document.pdf", 'w') file.write(response.read()) file.close() print("Completed") download_file(e)
Use the binary mode b Ex: def download_file(download_url): response = urllib2.urlopen(download_url) with open("document.pdf", 'wb') as outfile: outfile.write(response.read()) print("Completed") download_file(e)
How do I creat CSV file with webscraped content from several URLs?
I want to create a CSV file from webscraped content. The content is from FinViz.com I want to scrape the table from this website 20 times for 20 different stocks and input all the content into a CSV file. Within my code, I generate a list of stocks from a scrape of twitter content. The list of stocks that is generated is the same list that I want to get information on from the FinViz.com tables. Here is my code: import csv import urllib.request from bs4 import BeautifulSoup twiturl = "https://twitter.com/ACInvestorBlog" twitpage = urllib.request.urlopen(twiturl) soup = BeautifulSoup(twitpage,"html.parser") print(soup.title.text) tweets = [i.text for i in soup.select('a.twitter-cashtag.pretty-link.js-nav b')] print(tweets) url_base = "https://finviz.com/quote.ashx?t=" url_list = [url_base + tckr for tckr in tweets] for url in url_list: fpage = urllib.request.urlopen(url) fsoup = BeautifulSoup(fpage, 'html.parser') # scrape single page and add data to list # write datalist with open('today.csv', 'a') as file: writer = csv.writer(file) # write header row writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2-cp'}))) # write body row writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2'}))) The trouble that I am running into is that my CSV file only has the webscraped data from the last item in the list. Instead I want the entire list in a sequence of rows. Here is what my CSV file looks like: Index,P/E,EPS (ttm),Insider Own,Shs Outstand,Perf Week,Market Cap,Forward P/E,EPS next Y,Insider Trans,Shs Float,Perf Month,Income,PEG,EPS next Q,Inst Own,Short Float,Perf Quarter,Sales,P/S,EPS this Y,Inst Trans,Short Ratio,Perf Half Y,Book/sh,P/B,EPS next Y,ROA,Target Price,Perf Year,Cash/sh,P/C,EPS next 5Y,ROE,52W Range,Perf YTD,Dividend,P/FCF,EPS past 5Y,ROI,52W High,Beta,Dividend %,Quick Ratio,Sales past 5Y,Gross Margin,52W Low,ATR,Employees,Current Ratio,Sales Q/Q,Oper. Margin,RSI (14),Volatility,Optionable,Debt/Eq,EPS Q/Q,Profit Margin,Rel Volume,Prev Close,Shortable,LT Debt/Eq,Earnings,Payout,Avg Volume,Price,Recom,SMA20,SMA50,SMA200,Volume,Change -,-,-1.75,7.94%,79.06M,-22.52%,296.48M,-,-1.74,-4.61%,72.41M,-23.16%,-85.70M,-,-0.36,62.00%,3.21%,1.63%,15.10M,19.63,-197.00%,18.05%,2.57,66.67%,-0.65,-,-8.10%,-127.70%,12.17,-6.25%,0.93,4.03,-,146.70%,2.05 - 5.86,3.59%,-,-,-,385.80%,-36.01%,-,-,1.30,-,76.50%,82.93%,0.41,100,1.30,-59.60%,-,36.98,16.13% 9.32%,Yes,-,90.00%,-,0.82,3.63,Yes,-,Nov 08,-,902.43K,3.75,2.30,-22.08%,-10.43%,11.96%,"742,414",3.31%
It would be better to open your output file first, rather than keep on opening/closing it for each URL that you fetch. Exception handling is needed to catch cases where the URL does not exist. Also on your output, you should open the file with newline='' to avoid extra empty lines being written to the file: import csv import urllib.request from bs4 import BeautifulSoup write_header = True twiturl = "https://twitter.com/ACInvestorBlog" twitpage = urllib.request.urlopen(twiturl) soup = BeautifulSoup(twitpage,"html.parser") print(soup.title.text) tweets = [i.text for i in soup.select('a.twitter-cashtag.pretty-link.js-nav b')] print(tweets) url_base = "https://finviz.com/quote.ashx?t=" url_list = [url_base + tckr for tckr in tweets] with open('today.csv', 'w', newline='') as file: writer = csv.writer(file) for url in url_list: try: fpage = urllib.request.urlopen(url) fsoup = BeautifulSoup(fpage, 'html.parser') # write header row (once) if write_header: writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2-cp'}))) write_header = False # write body row writer.writerow(map(lambda e : e.text, fsoup.find_all('td', {'class':'snapshot-td2'}))) except urllib.error.HTTPError: print("{} - not found".format(url)) So today.csv would start like: Index,P/E,EPS (ttm),Insider Own,Shs Outstand,Perf Week,Market Cap,Forward P/E,EPS next Y,Insider Trans,Shs Float,Perf Month,Income,PEG,EPS next Q,Inst Own,Short Float,Perf Quarter,Sales,P/S,EPS this Y,Inst Trans,Short Ratio,Perf Half Y,Book/sh,P/B,EPS next Y,ROA,Target Price,Perf Year,Cash/sh,P/C,EPS next 5Y,ROE,52W Range,Perf YTD,Dividend,P/FCF,EPS past 5Y,ROI,52W High,Beta,Dividend %,Quick Ratio,Sales past 5Y,Gross Margin,52W Low,ATR,Employees,Current Ratio,Sales Q/Q,Oper. Margin,RSI (14),Volatility,Optionable,Debt/Eq,EPS Q/Q,Profit Margin,Rel Volume,Prev Close,Shortable,LT Debt/Eq,Earnings,Payout,Avg Volume,Price,Recom,SMA20,SMA50,SMA200,Volume,Change -,-,-10.85,4.60%,2.36M,11.00%,8.09M,-,-,-62.38%,1.95M,-16.14%,-14.90M,-,-,2.30%,10.00%,-44.42%,0.00M,-,21.80%,-5.24%,3.10,-38.16%,1.46,2.35,-,-155.10%,65.00,-50.47%,-,-,-,-238.40%,2.91 - 11.20,-38.29%,-,-,54.50%,-,-69.37%,1.63,-,2.20,-,-,17.87%,0.36,15,2.20,-,-,39.83,11.38% 10.28%,No,0.00,68.70%,-,1.48,3.30,Yes,0.00,Feb 28 AMC,-,62.76K,3.43,1.00,-5.21%,-25.44%,-37.33%,"93,166",3.94% -,-,-0.26,1.50%,268.98M,3.72%,2.25B,38.05,0.22,-0.64%,263.68M,-9.12%,-55.50M,-,0.05,-,9.96%,-12.26%,1.06B,2.12,-328.10%,25.95%,2.32,17.72%,12.61,0.66,650.00%,-0.90%,12.64,-38.73%,0.03,264.87,-,-1.90%,6.69 - 15.27,-0.48%,-,-,-28.70%,0.00%,-45.17%,2.20,-,0.70,16.40%,67.80%,25.11%,0.41,477,0.80,71.90%,5.30%,52.71,4.83% 5.00%,Yes,0.80,7.80%,-5.20%,0.96,7.78,Yes,0.80,Feb 27 AMC,-,11.31M,8.37,2.20,0.99%,-1.63%,-4.72%,"10,843,026",7.58% If you only want your file to contain data from one run of the script, you do not need a to append, just use w instead.