I am using multi-threading to visit links read from a csv, strangely irrespective of the max-workers or even when I remove the multi-threading part, the code runs for an arbitrarily lower number of urls than in the list. I print the list to verify the count. For e.g if the list has 5000 urls, the code stops at 4084, if the links are 13,000 it will stop at 9200, even when it is just 130 links it will stop at 80 or something. What am I doing wrong here?
import requests
import xlrd
import concurrent.futures
from bs4 import BeautifulSoup
import csv
header_added = False
file_location = "Urls.xlsx"
workbook = xlrd.open_workbook(file_location)
sheet = workbook.sheet_by_index(0)
all_links = []
for row in range(1, 11000):
all_links.append(sheet.cell_value(row,0))
print(len(all_links))
i = 0
def get_solution(url):
global header_added, i
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
ques_div = soup.find('p', class_='header-description')
ques = ques_div.find('span').text
ans_divs = soup.findAll('div', class_='puzzle-solution')
ans = ans_divs[0].text
print("Solution ", i)
i += 1
dict1 ={"Words": ques, "Solution": ans}
with open('Results10k.csv', 'a+', encoding='utf-8') as f:
w = csv.DictWriter(f, dict1.keys())
if not header_added:
w.writeheader()
header_added = True
w.writerow(dict1)
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
result = executor.map(get_solution, all_links)
Here's a reworking of your code that doesn't need locks – instead, there's only ever one process that writes to the file.
Also, due to the GIL, using a ThreadPool will be slower than a process-backed Pool.
import csv
import multiprocessing
import requests
import xlrd
from bs4 import BeautifulSoup
sess = requests.Session()
def get_solution(url):
try:
resp = sess.get(url)
resp.raise_for_status()
page = resp.text
soup = BeautifulSoup(page, "html.parser")
ques_div = soup.find("p", class_="header-description")
ques = ques_div.find("span").text.strip()
ans_divs = soup.findAll("div", class_="puzzle-solution")
ans = ans_divs[0].text.strip()
return {"URL": url, "Words": ques, "Solution": ans, "Error": ""}
except Exception as exc:
print(url, "Error:", exc)
return {"URL": url, "Words": "", "Solution": "", "Error": str(exc)}
def read_links(file_location):
workbook = xlrd.open_workbook(file_location)
sheet = workbook.sheet_by_index(0)
all_links = []
for row in range(1, 11000):
all_links.append(sheet.cell_value(row, 0))
return all_links
def main():
links = read_links("./Urls.xlsx")
with open("Results10k.csv", "w", encoding="utf-8") as f:
with multiprocessing.Pool() as p: # (or multiprocessing.pool.ThreadPool)
for i, result in enumerate(p.imap_unordered(get_solution, links, chunksize=16)):
if i == 0:
writer = csv.DictWriter(f, result.keys())
writer.writeheader()
writer.writerow(result)
f.flush() # Ensure changes are written immediately
if i % 100 == 0: # Progress indicator
print(i)
if __name__ == "__main__":
main()
It could be, that get_solution() crashes for some of the URLs. You could add a try/except in the body of the function and write all crashed URLS to a different file.
def get_solution(url):
try:
...
except:
with open('errors.txt','a+') as f:
f.write(url+'\n')
If this is the problem the numbers should add up to the total number.
Also open() is probably not thread safe.
file_lock = threading.Lock()
def get_solution(url):
with file_lock:
with open('Results10k.csv', 'a+', encoding='utf-8') as f:
w = csv.DictWriter(f, dict1.keys())
...
Related
I had a problem when I took out html files and imported them into excel.
This is the site i need to get information: https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html
As you can see, in the GDP table I have a row named : 年份 separated from 2 lines
That's why after i exported the excel file it gave unexpected results
The result I want is that the first line in excel will only have : 年份 , GDP(美元), 占世界%
Sorry for my confusing explanation, I really don't know how to explain it in detail.
Here is my python code
import requests
from bs4 import BeautifulSoup
import lxml
import csv
def get_html(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
r = "fail"
return r
def getGDP(ulist,html):
soup = BeautifulSoup(html,"html.parser")
trs = soup.find_all('tr')
for tr in trs:
list = []
for th in tr:
ts = th.string
if ts == '\n':
continue
list.append(ts)
ulist.append(list)
def saveGDP(ulist):
file_name = '21095010 胡碧玉 GDP.csv'
with open(file_name,'w',errors='ignore',newline='') as f:
f_csv = csv.writer(f)
f_csv.writerows(ulist)
def main():
unifo=[]
url='https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html'
html=get_html(url)
getGDP(unifo,html)
saveGDP(unifo)
if __name__=="__main__":
main()
Thank you so much!
Using pandas scraping tables and cleaning of results in most cases is mutch easier - under the hood beautifulsoup is working for you.
In this case read_html() the table, drop the unwanted header level and filter out the rows containings ads:
import pandas as pd
df = pd.read_html('https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html')[0].droplevel(0, axis=1)
df[~df.iloc[:,0].str.contains('ads')].to_csv('21095010 胡碧玉 GDP.csv', index=False)
Answering your question
You have to select your elements more specific e.g. with css selectors.
So first get the thead information from all th witout colspan, than collect the data from all tr in tbody that do not contains ads:
def getGDP(html):
soup = BeautifulSoup(html,"html.parser")
data = []
data.append([th.text for th in soup.select('thead th:not([colspan])')])
for row in soup.select('tbody tr:not(:-soup-contains("ads"))'):
data.append(list(row.stripped_strings))
return data
Example
import requests
from bs4 import BeautifulSoup
import lxml
import csv
def get_html(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
r = "fail"
return r
def getGDP(html):
soup = BeautifulSoup(html,"html.parser")
data = []
data.append([th.text for th in soup.select('thead th:not([colspan])')])
for x in soup.select('tbody tr:not(:-soup-contains("ads"))'):
data.append(list(x.stripped_strings))
return data
def saveGDP(ulist):
file_name = '21095010 胡碧玉 GDP.csv'
print(ulist)
with open(file_name,'w',errors='ignore', encoding='utf-8') as f:
f_csv = csv.writer(f)
f_csv.writerows(ulist)
def main():
url='https://www.kylc.com/stats/global/yearly_per_country/g_gdp/vnm.html'
html=get_html(url)
saveGDP(getGDP(html))
if __name__=="__main__":
main()
When I try to write the information in the csv file, error is thrown:
Traceback (most recent call last):
File "sizeer.py", line 68, in <module>
writer.writerow([name,color,price])
ValueError: I/O operation on closed file
import requests
import csv
from bs4 import BeautifulSoup
proxies = {
"http":"http://195.189.60.97:3128",
"http":"http://103.78.75.165:8080",
"http":"http://212.87.220.2:3128",
"http":"http://88.99.134.61:8080",
"http":"http://103.102.139.178:8080",
"http":"http://218.60.8.83:3129",
"http":"http://124.121.105.193:8888",
"http":"http://198.237.114.54:8080",
"http":"http://36.67.106.58:8080",
"http":"http://35.214.241.28:3128"
}
base_url = ...
page = requests.get(base_url, proxies=proxies)
if page.status_code != 200:
exit("Page wasn't parsed")
soup = BeautifulSoup(page.content, 'lxml')
with open("result.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["Product","Color","Price"])
#Get categories
category_wrapper = soup.find_all(class_="m-menu_subItem")
categories = []
for cw in category_wrapper:
anchor = cw.find("a", recursive=False)
categories.append(anchor['href'])
#Iterrate categories
for category in categories:
cat_page = requests.get(base_url + category, proxies=proxies)
cat_soup = BeautifulSoup(cat_page.content, 'lxml')
products_wrapper = cat_soup.find(class_="b-productList")
cat_pagination = products_wrapper.find(class_="m-pagination").find_all("span")
max_page = [int(s) for s in cat_pagination[-1].text.split() if s.isdigit()][0]
#Iterrate category with pagination and get products
for i in range(1, max_page+1):
cat_pagination_page = requests.get(base_url+category+"/?sort=default&limit=60&page="+str(i), proxies=proxies)
cat_pagination_page_soup = BeautifulSoup(cat_pagination_page.content, 'lxml')
product_links = cat_pagination_page_soup.find_all(class_="b-itemList_photoLink")
for link in product_links:
#Get product data
product_page = requests.get(base_url+link['href'], proxies=proxies)
product_soup = BeautifulSoup(product_page.content, 'lxml')
#Get product variations
variations = product_soup.find_all(class_="m-productDescr_colorItem")
#If there are variations
if len(variations) > 0:
for v in variations:
variation_page = requests.get(base_url+v['href'], proxies=proxies)
variation_soup = BeautifulSoup(variation_page.content, 'lxml')
price = variation_soup.find(class_="s-newPrice").text.strip().split(" ")[0]
name = variation_soup.find(class_="m-productDescr_headline").text.strip()
color = v['title']
print(name)
print(color)
print(price)
print("-------------")
#Save in csv
writer.writerow([name,color,price])
print("SCRAPING DONE")
How to keep the file open through the whole script execution ? Or I have to open it every time I am adding content ? EDIT In fact, the file is not even created.
with open("result.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["Product","Color","Price"])
The file closes at the end of the with block - that is the block's purpose.
You could put everything inside the block, but that only makes the existing problem worse: the code is reaching several levels of indents, is long and becomes difficult to understand. This is why you use functions to organize the code. For example, if you have the big for loop set in a function:
def do_stuff_with(categories, writer):
for category in categories:
# lots of logic here
# use `writer.writerow` when needed
# Get everything else set up that doesn't need the file, first
categories = ... # do the BeautifulSoup input stuff
# then we can open the file and use the function:
with open("result.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["Product","Color","Price"])
do_stuff_with(categories, writer)
Once you have that working, you can probably think of ways to apply the technique further. For example, pull out the innermost logic, for handling the variations for a single product. Or you can have a function to handle the creation of the categories data, and return it.
in this part of scraping code , I fetch alot of URLs from stored URLs in (url.xml) file and it is take so long to finish, how to implement (multiprocessing pool)
any simple code to fix this problem ? Thanks
from bs4 import BeautifulSoup as soup
import requests
from multiprocessing import Pool
p = Pool(10) # “10” means that 10 URLs will be processed at the same time
p.map
page_url = "url.xml"
out_filename = "prices.csv"
headers = "availableOffers,otherpricess,currentprice \n"
with open(out_filename, "w") as fw:
fw.write(headers)
with open("url.xml", "r") as fr:
for url in map(lambda x: x.strip(), fr.readlines()):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})
otherpricess = page_soup.find("span", {"class": "price"})
currentprice = page_soup.find("div", {"class": "is"})
fw.write(availableOffers + ", " + otherpricess + ", " + currentprice + "\n")
p.terminate()
p.join()
You can use concurrent.futures standard package in python for multiprocessing and multi-threading.
In, your case, you don't need multiprocessing, multi-threading will help. Because, your function in computationally expensive.
By, use of multi-threading, you can send multiple request at same time. number_of_threads argument can control the number of the request, you want to send at a time.
I have created a function, extract_data_from_url_func that will extract the data from single URL and i pass this function and list of URLS to multi-threading executor using
concurrent.futures.
from bs4 import BeautifulSoup as soup
from concurrent.futures import ThreadPoolExecutor
import requests
page_url = "url.xml"
number_of_threads = 6
out_filename = "prices.csv"
headers = "availableOffers,otherpricess,currentprice \n"
def extract_data_from_url_func(url):
print(url)
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})["value"]
otherpricess = page_soup.find("span", {"class": "price"}).text.replace("$", "")
currentprice = page_soup.find("div", {"class": "is"}).text.strip().replace("$", "")
output_list = [availableOffers, otherpricess, currentprice]
output = ",".join(output_list)
print(output)
return output
with open("url.xml", "r") as fr:
URLS = list(map(lambda x: x.strip(), fr.readlines()))
with ThreadPoolExecutor(max_workers=number_of_threads) as executor:
results = executor.map( extract_data_from_url_func, URLS)
responses = []
for result in results:
responses.append(result)
with open(out_filename, "w") as fw:
fw.write(headers)
for response in responses:
fw.write(response)
reference: https://docs.python.org/3/library/concurrent.futures.html
It must be something of this form. Please make changes so that urls being passed to p.map is a list of urls:
from bs4 import BeautifulSoup as soup
import requests
from multiprocessing import Pool
import csv
def parse(url):
response = requests.get(url)
page_soup = soup(response.text, "html.parser")
availableOffers = page_soup.find("input", {"id": "availableOffers"})["value"]
otherpricess = page_soup.find("span", {"class": "price"}).text.replace("$", "")
currentprice = page_soup.find("div", {"class": "is"}).text.strip().replace("$", "")
return availableOffers, otherpricess, currentprice
if __name__ == '__main__':
urls = [ ... ] # List of urls to fetch from
p = Pool(10) # “10” means that 10 URLs will be processed at the same time
records = p.map(parse, urls)
p.terminate()
p.join()
with open("outfile.csv", "w") as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for r in records:
writer.writerow(r)
I have thousands of URLs in a text file, now I want to extract title and price from a product link. I tried to implement threads to do it faster but seems that it's not working correctly, producing duplicate data and executing script too long. Without using threads, the script works as expected.
Here is my code:
import requests
from bs4 import BeautifulSoup
import csv
import lxml
import threading
def runner(fname):
global lck
lck.acquire()
with open(fname, 'r') as f:
for line in f:
r = requests.get(line)
soup = BeautifulSoup(r.content, 'lxml')
try:
title = soup.find('h1', id='itemTitle').text.trim().encode('utf-8')
price = soup.find('span', itemprop='price').text.trim().encode('utf-8')
except:
price = "No price"
with open("Data.csv", 'a', newline='',) as file:
writer = csv.writer(file)
writer.writerow([title, price])
lck.release()
lck = threading.Lock()
fname = "ProductLinks.txt"
threads = []
for i in range(0, 3):
t = threading.Thread(target = runner, args = (fname, ))
threads.append(t)
t.start()
for t in threads:
t.join()
Can someone please guide me, on how to do it correctly, so it can extract and save data parallelly
It is producing duplicate results because when you create the threads you call the same function three times.
t = threading.Thread(target = runner, args = (fname, ))
When you execute the above line, the argument always stays fname which as far as I understand it is always "ProductLinks.txt". Therefore your program will go into runner and there I see that you loop over all the lines of the text.
I suspect that what you want to "parallelise" is exactly that looping over the text lines? Then you would need to write a function parse_line and pass this one into the threading environment.
I would also suggest that you store the values in a dict and export to csv in the end because am not sure if the open environment is thread-safe.
def parse_line(line, result_dict):
r = requests.get(line)
soup = BeautifulSoup(r.content, 'lxml')
try:
title = soup.find('h1', id='itemTitle').text.trim().encode('utf-8')
price = soup.find('span', itemprop='price').text.trim().encode('utf-8')
result_dict[title] = price
except:
result_dict['No title'] = "No price"
Now, say that you have a list with all the lines in your file as strings. You can achieve that by doing the following
file_lines = []
with open(fname, 'r') as f:
for line in f:
file_lines.append(line)
Then you can call this function using Threading over the list of all lines in your file
my_dict = {}
for input_line in file_lines:
t = threading.Thread(target = parse_line, args = (input_line, my_dict))
threads.append(t)
t.start()
Finally you can export your dict to csv using pandas
import pandas as pd
pd.DataFrame(my_dict).to_csv("Data.csv")
I want scrape the title from given url in multiple thread (example in 5 thread)
and save them to one text file. how to do it and how to make sure I safely save the output to one file?
this is my code:
import csv
import requests
requests.packages.urllib3.disable_warnings()
urls = []
with open('Input.csv') as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
urls.append(row[1])
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def get_title( url ):
try:
r = requests.get(url)
html_content = r.text.encode('UTF-8')
title = find_between(html_content , "<title>", "</title>")
return title
except:
return ""
for url in urls:
f = open('myfile.txt', 'a')
f.write(get_title(url) + '\n')
f.close()
try to use futures
1. create pool
2. sumbit function and parameters
3. get result from function
import csv
from concurrent import futures
pool = futures.ThreadPoolExecutor(5)
workers = [pool.sumbit(get_title,url) for url in urls]
while not all(worker.done() for worker in workers):
pass
with open(file) as f:
w = csv.writer(f)
w.writerows([[worker.result()] for worker in workers])