in Python, how to implement (multiprocessing pool) - python

in this part of scraping code , I fetch alot of URLs from stored URLs in (url.xml) file and it is take so long to finish, how to implement (multiprocessing pool)
any simple code to fix this problem ? Thanks
from bs4 import BeautifulSoup as soup
import requests
from multiprocessing import Pool
p = Pool(10) # “10” means that 10 URLs will be processed at the same time
p.map
page_url = "url.xml"
out_filename = "prices.csv"
headers = "availableOffers,otherpricess,currentprice \n"
with open(out_filename, "w") as fw:
fw.write(headers)
with open("url.xml", "r") as fr:
for url in map(lambda x: x.strip(), fr.readlines()):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})
otherpricess = page_soup.find("span", {"class": "price"})
currentprice = page_soup.find("div", {"class": "is"})
fw.write(availableOffers + ", " + otherpricess + ", " + currentprice + "\n")
p.terminate()
p.join()

You can use concurrent.futures standard package in python for multiprocessing and multi-threading.
In, your case, you don't need multiprocessing, multi-threading will help. Because, your function in computationally expensive.
By, use of multi-threading, you can send multiple request at same time. number_of_threads argument can control the number of the request, you want to send at a time.
I have created a function, extract_data_from_url_func that will extract the data from single URL and i pass this function and list of URLS to multi-threading executor using
concurrent.futures.
from bs4 import BeautifulSoup as soup
from concurrent.futures import ThreadPoolExecutor
import requests
page_url = "url.xml"
number_of_threads = 6
out_filename = "prices.csv"
headers = "availableOffers,otherpricess,currentprice \n"
def extract_data_from_url_func(url):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})["value"]
otherpricess = page_soup.find("span", {"class": "price"}).text.replace("$", "")
currentprice = page_soup.find("div", {"class": "is"}).text.strip().replace("$", "")
output_list = [availableOffers, otherpricess, currentprice]
output = ",".join(output_list)
print(output)
return output
with open("url.xml", "r") as fr:
URLS = list(map(lambda x: x.strip(), fr.readlines()))
with ThreadPoolExecutor(max_workers=number_of_threads) as executor:
results = executor.map( extract_data_from_url_func, URLS)
responses = []
for result in results:
responses.append(result)
with open(out_filename, "w") as fw:
fw.write(headers)
for response in responses:
fw.write(response)
reference: https://docs.python.org/3/library/concurrent.futures.html

It must be something of this form. Please make changes so that urls being passed to p.map is a list of urls:
from bs4 import BeautifulSoup as soup
import requests
from multiprocessing import Pool
import csv
def parse(url):
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})["value"]
otherpricess = page_soup.find("span", {"class": "price"}).text.replace("$", "")
currentprice = page_soup.find("div", {"class": "is"}).text.strip().replace("$", "")
return availableOffers, otherpricess, currentprice
if __name__ == '__main__':
urls = [ ... ] # List of urls to fetch from
p = Pool(10) # “10” means that 10 URLs will be processed at the same time
records = p.map(parse, urls)
p.terminate()
p.join()
with open("outfile.csv", "w") as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for r in records:
writer.writerow(r)

Related

Handling HTML in python

I had a problem when I took out html files and imported them into excel.
This is the site i need to get information: https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html
As you can see, in the GDP table I have a row named : 年份 separated from 2 lines
That's why after i exported the excel file it gave unexpected results
The result I want is that the first line in excel will only have : 年份 , GDP(美元), 占世界%
Sorry for my confusing explanation, I really don't know how to explain it in detail.
Here is my python code
import requests
from bs4 import BeautifulSoup
import lxml
import csv
def get_html(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
r = "fail"
return r
def getGDP(ulist,html):
soup = BeautifulSoup(html,"html.parser")
trs = soup.find_all('tr')
for tr in trs:
list = []
for th in tr:
ts = th.string
if ts == '\n':
continue
list.append(ts)
ulist.append(list)
def saveGDP(ulist):
file_name = '21095010 胡碧玉 GDP.csv'
with open(file_name,'w',errors='ignore',newline='') as f:
f_csv = csv.writer(f)
f_csv.writerows(ulist)
def main():
unifo=[]
url='https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html'
html=get_html(url)
getGDP(unifo,html)
saveGDP(unifo)
if __name__=="__main__":
main()
Thank you so much!
Using pandas scraping tables and cleaning of results in most cases is mutch easier - under the hood beautifulsoup is working for you.
In this case read_html() the table, drop the unwanted header level and filter out the rows containings ads:
import pandas as pd
df = pd.read_html('https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html')[0].droplevel(0, axis=1)
df[~df.iloc[:,0].str.contains('ads')].to_csv('21095010 胡碧玉 GDP.csv', index=False)
Answering your question
You have to select your elements more specific e.g. with css selectors.
So first get the thead information from all th witout colspan, than collect the data from all tr in tbody that do not contains ads:
def getGDP(html):
soup = BeautifulSoup(html,"html.parser")
data = []
data.append([th.text for th in soup.select('thead th:not([colspan])')])
for row in soup.select('tbody tr:not(:-soup-contains("ads"))'):
data.append(list(row.stripped_strings))
return data
Example
import requests
from bs4 import BeautifulSoup
import lxml
import csv
def get_html(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
r = "fail"
return r
def getGDP(html):
soup = BeautifulSoup(html,"html.parser")
data = []
data.append([th.text for th in soup.select('thead th:not([colspan])')])
for x in soup.select('tbody tr:not(:-soup-contains("ads"))'):
data.append(list(x.stripped_strings))
return data
def saveGDP(ulist):
file_name = '21095010 胡碧玉 GDP.csv'
print(ulist)
with open(file_name,'w',errors='ignore', encoding='utf-8') as f:
f_csv = csv.writer(f)
f_csv.writerows(ulist)
def main():
url='https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html'
html=get_html(url)
saveGDP(getGDP(html))
if __name__=="__main__":
main()

Multi threading not processing full list

I am using multi-threading to visit links read from a csv, strangely irrespective of the max-workers or even when I remove the multi-threading part, the code runs for an arbitrarily lower number of urls than in the list. I print the list to verify the count. For e.g if the list has 5000 urls, the code stops at 4084, if the links are 13,000 it will stop at 9200, even when it is just 130 links it will stop at 80 or something. What am I doing wrong here?
import requests
import xlrd
import concurrent.futures
from bs4 import BeautifulSoup
import csv
header_added = False
file_location = "Urls.xlsx"
workbook = xlrd.open_workbook(file_location)
sheet = workbook.sheet_by_index(0)
all_links = []
for row in range(1, 11000):
all_links.append(sheet.cell_value(row,0))
print(len(all_links))
i = 0
def get_solution(url):
global header_added, i
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
ques_div = soup.find('p', class_='header-description')
ques = ques_div.find('span').text
ans_divs = soup.findAll('div', class_='puzzle-solution')
ans = ans_divs[0].text
print("Solution ", i)
i += 1
dict1 ={"Words": ques, "Solution": ans}
with open('Results10k.csv', 'a+', encoding='utf-8') as f:
w = csv.DictWriter(f, dict1.keys())
if not header_added:
w.writeheader()
header_added = True
w.writerow(dict1)
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
result = executor.map(get_solution, all_links)
Here's a reworking of your code that doesn't need locks – instead, there's only ever one process that writes to the file.
Also, due to the GIL, using a ThreadPool will be slower than a process-backed Pool.
import csv
import multiprocessing
import requests
import xlrd
from bs4 import BeautifulSoup
sess = requests.Session()
def get_solution(url):
try:
resp = sess.get(url)
resp.raise_for_status()
page = resp.text
soup = BeautifulSoup(page, "html.parser")
ques_div = soup.find("p", class_="header-description")
ques = ques_div.find("span").text.strip()
ans_divs = soup.findAll("div", class_="puzzle-solution")
ans = ans_divs[0].text.strip()
return {"URL": url, "Words": ques, "Solution": ans, "Error": ""}
except Exception as exc:
print(url, "Error:", exc)
return {"URL": url, "Words": "", "Solution": "", "Error": str(exc)}
def read_links(file_location):
workbook = xlrd.open_workbook(file_location)
sheet = workbook.sheet_by_index(0)
all_links = []
for row in range(1, 11000):
all_links.append(sheet.cell_value(row, 0))
return all_links
def main():
links = read_links("./Urls.xlsx")
with open("Results10k.csv", "w", encoding="utf-8") as f:
with multiprocessing.Pool() as p: # (or multiprocessing.pool.ThreadPool)
for i, result in enumerate(p.imap_unordered(get_solution, links, chunksize=16)):
if i == 0:
writer = csv.DictWriter(f, result.keys())
writer.writeheader()
writer.writerow(result)
f.flush() # Ensure changes are written immediately
if i % 100 == 0: # Progress indicator
print(i)
if __name__ == "__main__":
main()
It could be, that get_solution() crashes for some of the URLs. You could add a try/except in the body of the function and write all crashed URLS to a different file.
def get_solution(url):
try:
...
except:
with open('errors.txt','a+') as f:
f.write(url+'\n')
If this is the problem the numbers should add up to the total number.
Also open() is probably not thread safe.
file_lock = threading.Lock()
def get_solution(url):
with file_lock:
with open('Results10k.csv', 'a+', encoding='utf-8') as f:
w = csv.DictWriter(f, dict1.keys())
...

in python, getting only 1 result (last result) for each URL instead of 60 result for each URL

in python, getting only 1 result (last result) for each URL instead of 60 result for each URL
code this given blow
what should the code be after fixing ?
This is the python code
from bs4 import BeautifulSoup as soup
from concurrent.futures import ThreadPoolExecutor
import requests
page_url = "url.xml"
number_of_threads = 6
out_filename = "title.csv"
headers = "title,brand,category \n"
def extract_data_from_url_func(url):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
containers = page_soup.findAll('div',{'class' : 'column column-block block-grid-large single-item'})
for container in containers:
title = container['data-name'].replace(",", "|")
brand = container['data-brand-name']
category = container['data-category-name'].replace(",", "|")
output_list = [title,brand,category]
output = ",".join(output_list)
print(output)
return output
with open("url.xml", "r") as fr:
URLS = list(map(lambda x: x.strip(), fr.readlines()))
with ThreadPoolExecutor(max_workers=number_of_threads) as executor:
results = executor.map( extract_data_from_url_func, URLS)
responses = []
for result in results:
responses.append(result)
with open(out_filename, "w", encoding='utf-8-sig') as fw:
fw.write(headers)
for response in responses:
fw.write(response + "\n")
You should append the output for you to get the cumulative result. You can format the output according to your requirements. Code below:
from bs4 import BeautifulSoup as soup
from concurrent.futures import ThreadPoolExecutor
import requests
page_url = "url.xml"
number_of_threads = 6
out_filename = "title.csv"
headers = "title,brand,category \n"
def extract_data_from_url_func(url):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
containers = page_soup.findAll('div',{'class' : 'column column-block block-grid-large single-item'})
output = ''
for container in containers:
title = container['data-name'].replace(",", "|")
brand = container['data-brand-name']
category = container['data-category-name'].replace(",", "|")
output_list = [title,brand,category]
output = output + ",".join(output_list)
print(output)
return output
with open("url.xml", "r") as fr:
URLS = list(map(lambda x: x.strip(), fr.readlines()))
with ThreadPoolExecutor(max_workers=number_of_threads) as executor:
results = executor.map( extract_data_from_url_func, URLS)
responses = []
for result in results:
responses.append(result)
with open(out_filename, "w", encoding='utf-8-sig') as fw:
fw.write(headers)
for response in responses:
fw.write(response + "\n")
print(output) and return(output) has to be out of the for loop.

Web scraping python, empy output

I need some help with the following code:
import csv
import requests
from bs4 import BeautifulSoup
import datetime
filename = "imob_" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")+".csv"
with open(filename, "w+") as f:
writer = csv.writer(f)
writer.writerow(["Localizare","Pret","Data"])
for i in range(1,100):
r = requests.get("https://www.imobiliare.ro/inchirieri-case-vile/brasov?pagina="+format(i))
soup = BeautifulSoup(r.text, "html.parser")
array_price= soup.find_all('div', class_='pret')
array_desc=soup.find_all('h2', class_='titlu-anunt hidden-xs',text=True)
for iterator in range(0,len(array_price)):
localizare = array_desc[iterator].text.strip()
pret = array_price[iterator].text.strip()
writer.writerow([localizare, pret, datetime.datetime.now()])
The output is empty. Can someone give me an advice, please? Thank you.
You had a couple of issues:
First as stated in the comments the class price does not exist. You could use pret but it's easier to use soup.find_all('span', class_="pret-mare")
Second array_desc=soup.find_all('h2', class_='titlu-anunt hidden-xs',text=True) is returning empty. I removed text=True and it started working.
import csv
import requests
from bs4 import BeautifulSoup
import datetime
filename = "imob_" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")+".csv"
with open(filename, "w+") as f:
writer = csv.writer(f)
writer.writerow(["Localizare","Pret","Data"])
for i in range(1,100):
r = requests.get("https://www.imobiliare.ro/inchirieri-case-vile/brasov?pagina="+format(i))
soup = BeautifulSoup(r.text, "html.parser")
array_price = soup.find_all('span', class_="pret-mare")
array_desc=soup.find_all('h2', class_='titlu-anunt hidden-xs')
for iterator in range(0,len(array_price)):
localizare = array_desc[iterator].text.strip()
pret = array_price[iterator].text.strip()
writer.writerow([localizare, pret, datetime.datetime.now()])

python script - grouping words into a If-Not statement

Trying to figure out how to use an if not statement in which I can group three to four words to omit from a CSV file. Towards the bottom of the code, you'll see that I'm stuck at: if ('reddit', 'passwords') not in x:
Any help would be great.
# import libraries
import bs4
from urllib2 import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.reddit.com/r/NHLStreams/comments/71uhwi/game_thread_sabres_at_maple_leafs_730_pm_et/'
# opening up connection, grabbing the page
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
# html parsing
page_soup = soup(page_html, "html.parser")
filename = "sportstreams.csv"
f = open(filename, "w")
headers = "Sport Links " + "\n"
f.write(headers)
links = page_soup.select("form a[href]")
for link in links:
href = link["href"]
print(href)
f.write(href + "\n")
with open('sportstreams.csv') as f,open('sstream.csv', "w") as f2:
for x in f:
if ('reddit', 'passwords') not in x: # trying to find multi words to omit
f2.write(x.strip()+'\n')
Use the builtin function all:
if all(t not in x for t in ('reddit', 'passwords')):
Or any:
if not any(t in x for t in ('reddit', 'passwords')):
Here's it is in your context manager:
with open('sportstreams.csv') as f, open('sstream.csv', "w") as f2:
for line in f:
if any(t in line for t in ('reddit', 'passwords')):
# The line contains one of the strings.
continue
else:
# The line contains none of the strings.
f2.write(line.strip() + '\n')

Categories

Resources