Appending to array using pool - python

I am trying to scrape data from soccerway.com and checking whether the page is a completed game/game to be played with each instance being written to a seperate csv file. I am running through 10,000 pages and so have written it using Pools. However, I am getting empty lists from the append function and cannot write anything to the csv files.
I tried writing straight to the file instead of list appending however this gave incomplete files
import requests
from bs4 import BeautifulSoup
import time
import numpy as np
import uuid
import time
from multiprocessing import Pool
import sys, os
fixturesA = []
linksA = []
statsA = []
def parse(url):
try:
#print(url)
delays = [0.25,0.5,0.75,1]
delay = np.random.choice(delays)
#time.sleep(delay)
#r = requests.get(url)
r = requests.get(url, timeout = 10)
soup = BeautifulSoup(r.content, "html.parser")
teams = soup.findAll('h3', attrs = {'class' : 'thick'})
homeTeam = teams[0].text.strip()
awayTeam = teams[2].text.strip()
middle = teams[1].text.strip()
dds = soup.findAll('dd')
date = dds[1].text.strip()
gameWeek = dds[2].text.strip()
if ':' not in middle:
middle = middle.split(" - ")
homeGoals = 0
awayGoals = 0
homeGoals = middle[0]
try:
awayGoals = middle[1]
except Exception as e:
homeGoals = "-1"
awayGoals = "-1"
matchGoals = int(homeGoals) + int(awayGoals)
if(matchGoals >= 0):
if(int(homeGoals) > 0 and int(awayGoals) > 0):
btts = "y"
else:
btts = "n"
halfTimeScore = dds[4].text.strip().split(" - ")
firstHalfHomeGoals = halfTimeScore[0]
firstHalfAwayConc = halfTimeScore[0]
firstHalfAwayGoals = halfTimeScore[1]
firstHalfHomeConc = halfTimeScore[1]
firstHalfTotalGoals = int(firstHalfHomeGoals) + int(firstHalfAwayGoals)
secondHalfHomeGoals = int(homeGoals) - int(firstHalfHomeGoals)
secondHalfAwayConc = int(homeGoals) - int(firstHalfHomeGoals)
secondHalfAwayGoals = int(awayGoals) - int(firstHalfAwayGoals)
secondHalfHomeConc = int(awayGoals) - int(firstHalfAwayGoals)
secondHalfTotalGoals = matchGoals - firstHalfTotalGoals
homeTeamContainers = soup.findAll('div', attrs = {'class' : 'container left'})
homeTeamStarting = homeTeamContainers[2]
homeTeamBench = homeTeamContainers[3]
homeTeamYellows = len(homeTeamStarting.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/700/img/events/YC.png' })) + len(homeTeamBench.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/699/img/events/YC.png' }))
homeTeamReds = len(homeTeamStarting.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/700/img/events/RC.png' })) + len(homeTeamBench.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/699/img/events/RC.png' }))
homeTeamCards = homeTeamYellows + homeTeamReds
awayTeamContainers = soup.findAll('div', attrs = {'class' : 'container right'})
awayTeamStarting = awayTeamContainers[2]
awayTeamBench = awayTeamContainers[3]
awayTeamYellows = len(awayTeamStarting.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/700/img/events/YC.png' })) + len(awayTeamBench.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/699/img/events/YC.png' }))
awayTeamReds = len(awayTeamStarting.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/700/img/events/RC.png' })) + len(awayTeamBench.findAll('img', attrs = {'src' : 'https://s1.swimg.net/gsmf/699/img/events/RC.png' }))
awayTeamCards = awayTeamYellows + awayTeamReds
matchCards = homeTeamCards + awayTeamCards
try:
iframe = soup.findAll('iframe')
iframeSrc = iframe[1]['src']
url = 'https://us.soccerway.com/' + iframeSrc
c = requests.get(url,timeout = 10)
soupC = BeautifulSoup(c.content, "html.parser")
cornerContainer = soupC.findAll('td', attrs = {'class' : 'legend left value'})
homeCorners = cornerContainer[0].text.strip()
awayCornersConc = homeCorners
cornerContainer = soupC.findAll('td', attrs = {'class' : 'legend right value'})
awayCorners = cornerContainer[0].text.strip()
homeCornersConc = awayCorners
matchCorners = int(homeCorners) + int(awayCorners)
print("Got Score . " + homeTeam + " vs " + awayTeam+" . " + gameWeek )
statsA.append(homeTeam + "," + awayTeam + "," + gameWeek + "," + homeGoals + "," + awayGoals + "," + str(matchGoals) + "," + btts + "," + firstHalfHomeGoals + "," + firstHalfHomeConc + "," + firstHalfAwayGoals + "," + firstHalfAwayConc + "," + str(firstHalfTotalGoals) + "," + str(secondHalfHomeGoals) + "," + str(secondHalfHomeConc) + "," + str(secondHalfAwayGoals) + "," + str(secondHalfAwayConc) + "," + str(secondHalfTotalGoals) + "," + str(homeTeamCards) + "," + str(awayTeamCards) + "," + str(matchCards) + "," + homeCorners + "," + awayCorners + "," + homeCornersConc + "," + awayCornersConc + "," + str(matchCorners)+","+dds[0].text.strip() + "\n")
return None
except Exception as e:
print("Got Score no corners. " + homeTeam + " vs " + awayTeam+" . " + gameWeek + " NO FRAME")
statsA.append(homeTeam + "," + awayTeam + "," + gameWeek + "," + homeGoals + "," + awayGoals + "," + str(matchGoals) + "," + btts + "," + firstHalfHomeGoals + "," + firstHalfHomeConc + "," + firstHalfAwayGoals + "," + firstHalfAwayConc + "," + str(firstHalfTotalGoals) + "," + str(secondHalfHomeGoals) + "," + str(secondHalfHomeConc) + "," + str(secondHalfAwayGoals) + "," + str(secondHalfAwayConc) + "," + str(secondHalfTotalGoals) + "," + str(homeTeamCards) + "," + str(awayTeamCards) + "," + str(matchCards) + "," + "" + "," + "" + "," + "" + "," + "" + "," + ""+","+dds[0].text.strip() + "\n")
return None
else:
fixturesA.append(homeTeam + "," + awayTeam + "," + gameWeek + "," + date + "\n")
linksA.append(url + "\n")
print(homeTeam + " vs " + awayTeam + " at " + middle + " GW:" + gameWeek)
return None
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
linksA.append(url + "\n")
print(url)
return None
stats = open('Statsv2.csv','a',encoding='utf-8')
fixtures = open('fixturesv2.csv','w',encoding='utf-8')
with open('links.txt') as f:
content = f.readlines()
content = [x.strip() for x in content]
links = open('links.txt','w')
if __name__ == '__main__':
start_time = time.time()
p = Pool(20) # Pool tells how many at a time
records = p.map(parse, content)
p.terminate()
p.join()
print("--- %s seconds ---" % (time.time() - start_time))

I assume you are running Windows? Then the answer is that multi-processing in Windows creates copies instead of forks. So you have your main process with the lists and you get your working processes (from pool) with their own separate set of lists.
The workers most likely fill their list correctly, but the lists in the main-process don't get any data and so are staying empty. And the workers do not return anything. So, as you write your files in the main-process, you get empty files.
An easy way to solve this is creating pipes or queues between the main process and the workers to allow communication between the threads. You could also use shared arrays like they are provided by the multiprocessing class, but than you would need to know the length during creation.
see documentation: Multiprocessing

as pointed out by #RaJa you're not actually doing anything the parent/controlling process can see. the easiest is just to return values from the mapped function
for example, parse() could return tuple at the end like:
def parse(url):
# do work
return url, homeTeam, awayTeam, gameWeek, homeGoals, awayGoals # ...
then the parent process can receive the values and do useful things like saving them to a CSV file:
import csv
with Pool(20) as pool:
records = pool.map(parse, content)
with open('stats.csv', 'w') as fd:
out = csv.writer(fd)
out.writerow([
'url', 'hometeam', 'awayteam',
# and the remaining column names for the header
])
out.writerows(records)

Related

Unintend does not match while it seems absolutely correct

My code throws error I really can't understand why. There it is:
File "alon.py", line 152
fig.write_image("files/table_" + product_name + ".pdf")
^
IndentationError: unindent does not match any outer indentation level
If I remove this line, it works. Can't see how it is unintended. It is under if type(product_data) is dict: statement. On the same level like the last line of code before it. What can cause such a behaviour ?
import MySQLdb
from plotly import graph_objs as go
import numpy as np
import os
from plotly.subplots import make_subplots
from PyPDF2 import PdfFileMerger
from datetime import datetime, timedelta
# Database connect
db = MySQLdb.connect(host="localhost",
user="root",
passwd="abc9110033969",
db="alon")
today = datetime.today().strftime('%Y-%m-%d')
one_week = (datetime.today() - timedelta(days=7)).strftime('%Y-%m-%d')
two_week = (datetime.today() - timedelta(days=14)).strftime('%Y-%m-%d')
three_week = (datetime.today() - timedelta(days=21)).strftime('%Y-%m-%d')
four_week = (datetime.today() - timedelta(days=28)).strftime('%Y-%m-%d')
# Functions
def load_post_views(table, today, one_week, two_week, three_week, four_week):
product_views_dict = dict()
cursor = db.cursor()
cursor.execute(
"SELECT client_id, product_id, referrer, `date`" +
" FROM " + table +
" WHERE `date`>='"+four_week+"'")
social_dict = {
"facebook": 0,
"twitter": 0,
"instagram": 0,
"linkedin": 0,
"pinterest": 0,
"website": 0,
}
for x in range(0, cursor.rowcount):
row = cursor.fetchone()
network = ""
period = ""
client_id = row[0]
product_id = row[1]
referrer = row[2]
date = str(row[3])
email_cursor = db.cursor()
email_cursor.execute("SELECT address FROM c8ty_connections_email WHERE entry_id=" + str(client_id))
email = email_cursor.fetchone()
product_cursor = db.cursor()
product_cursor.execute("SELECT post_title FROM c8ty_posts WHERE id=" + str(product_id))
product_name = product_cursor.fetchone()
# Add client ID key
if client_id not in product_views_dict:
product_views_dict[client_id] = dict()
# Add product ID key to client ID parent key
if product_id not in product_views_dict[client_id]:
product_views_dict[client_id][product_id] = {
today + " - " + one_week: social_dict,
one_week + " - " + two_week: social_dict,
two_week + " - " + three_week: social_dict,
three_week + " - " + four_week: social_dict
}
# Find referrer
if "facebook" in referrer:
network = "facebook"
elif "twitter" in referrer:
network = "twitter"
elif "instagram" in referrer:
network = "instagram"
elif "linkedin" in referrer:
network = "linkedin"
elif "pinterest" in referrer:
network = "pinterest"
else:
network = "website"
# Check view period
if date <= today and date > one_week:
period = today + " - " + one_week
if date <= one_week and date > two_week:
period = one_week + " - " + two_week
if date <= two_week and date > three_week:
period = two_week + " - " + three_week
if date <= three_week and date > four_week:
period = three_week + " - " + four_week
product_views_dict[client_id][product_id][period][network] += 1
product_views_dict[client_id]["email"] = email[0]
product_views_dict[client_id][product_id]["product"] = product_name[0]
return product_views_dict
# Init
product_views_dict = load_post_views("an_product_view", today, one_week, two_week, three_week, four_week)
brochure_views_dict = load_post_views("an_brochure_view", today, one_week, two_week, three_week, four_week)
for clinetID, product_info in product_views_dict.items():
client_email = product_info["email"]
for productID, product_data in product_info.items():
if type(product_data) is dict:
product_name = product_data['product']
table_data = [
[
today + " - " + one_week,
one_week + " - " + two_week,
two_week + " - " + three_week,
three_week + " - " + four_week,
today + " - " + four_week
]
]
for network in ["website", "facebook", "twitter", "instagram", "linkedin", "pinterest"]:
table_data.append([
product_data[today + " - " + one_week][network],
product_data[one_week + " - " + two_week][network],
product_data[two_week + " - " + three_week][network],
product_data[three_week + " - " + four_week][network],
sum([
int(product_data[today + " - " + one_week][network]),
int(product_data[one_week + " - " + two_week][network]),
int(product_data[two_week + " - " + three_week][network]),
int(product_data[three_week + " - " + four_week][network])
])
])
fig = make_subplots(rows=5, cols=2)
# Create product table
fig.add_trace(
go.Table(
header=dict(values=["Period", "Website", "Facebook", "Twitter", "Instagram", "LinkedIn", "Pinterest", "Total"]),
cells=dict(values=table_data)
)
)
# Craete folder if doesn't exist
if not os.path.exists("files"):
os.mkdir("files")
# Write pdf
fig.write_image("files/table_" + product_name + ".pdf")
db.close()
exit()
Check in your source file that you aren't mixing spaces and tabs? It should be consistent, and only one or the other. I recommend spaces to comply with PEP8.

How to fix stopping loop in following code

I'm working at parser. Loop breaks after exception. Need ur help
def requestBarter():
response = requests.get(api url)
return response.json();
def responsePrint(id, json_data):
title = json_data[id]["title"]
tradable = json_data[id]["tradable"]
wishlist = json_data[id]["wishlist"]
library = json_data[id]["library"]
bundles = json_data[id]["bundles"]
cards = json_data[id]["cards"]
userreviews = json_data[id]["userreviews"]
print("ID: " + id + " | Titile: " + title + " | Tradable: " + str(tradable) + " | Wishlist: " + str(
wishlist) + " | Library: " + str(
library) + " | Bundles: " + str(bundles) + " | Cards: " + str(cards) + " | Userreviews: " + str(userreviews))
def responsePrintOnException(id, json_data):
title = json_data[id]["title"]
tradable = json_data[id]["tradable"]
wishlist = json_data[id]["wishlist"]
library = json_data[id]["library"]
bundles = json_data[id]["bundles"]
cards = json_data[id]["cards"]
print("ID: " + id + " | Titile: " + title + " | Tradable: " + str(tradable) + " | Wishlist: " + str(
wishlist) + " | Library: " + str(
library) + " | Bundles: " + str(bundles) + " | Cards: " + str(cards))
def getAll():
try:
json_data = requestBarter()
for id in json_data:
responsePrint(id, json_data)
except KeyError:
responsePrintOnException(id, json_data)
pass
if __name__ == '__main__':
getAll()
After KeyError getting out of loop so need help in the following code
Expected over 90000 lines. actual output - 30

Python script write to file stopping after certain point

I'm trying to analyze a sqlite3 file and printing the results to a text file. If i test the code with print it all works fine. When i write it to a file it cuts out at the same point every time.
import sqlite3
import datetime
import time
conn = sqlite3.connect("History.sqlite")
curs = conn.cursor()
results = curs.execute("SELECT visits.id, visits.visit_time, urls.url, urls.visit_count \
FROM visits INNER JOIN urls ON urls.id = visits.url \
ORDER BY visits.id;")
exportfile = open('chrome_report.txt', 'w')
for row in results:
timestamp = row[1]
epoch_start = datetime.datetime(1601,1,1)
delta = datetime.timedelta(microseconds=int(timestamp))
fulltime = epoch_start + delta
string = str(fulltime)
timeprint = string[:19]
exportfile.write("ID: " + str(row[0]) + "\t")
exportfile.write("visit time: " + str(timeprint) + "\t")
exportfile.write("Url: " + str(row[2]) + "\t")
exportfile.write("Visit count: " + str(row[3]))
exportfile.write("\n")
print "ID: " + str(row[0]) + "\t"
print "visit time: " + str(timeprint) + "\t"
print "Url: " + str(row[2]) + "\t"
print "Visit count: " + str(row[3])
print "\n"
conn.close()
So the print results give the proper result but the export to the file stops in the middle of a url.
OK, I would start by replacing the for loop with the one below
with open('chrome_report.txt', 'w') as exportfile:
for row in results:
try:
timestamp = row[1]
epoch_start = datetime.datetime(1601,1,1)
delta = datetime.timedelta(microseconds=int(timestamp))
fulltime = epoch_start + delta
string = str(fulltime)
timeprint = string[:19]
exportfile.write("ID: " + str(row[0]) + "\t")
exportfile.write("visit time: " + str(timeprint) + "\t")
exportfile.write("Url: " + str(row[2]) + "\t")
exportfile.write("Visit count: " + str(row[3]))
exportfile.write("\n")
print "ID: " + str(row[0]) + "\t"
print "visit time: " + str(timeprint) + "\t"
print "Url: " + str(row[2]) + "\t"
print "Visit count: " + str(row[3])
print "\n"
except Exception as err:
print(err)
By using the "with" statement (context manager) we eliminate the need to close the file. By using the try/except we capture the error and print it. This will show you where your code is failing and why.

How to get new line

How can I print a new line on the output file? When I try to add the new line with "/n" it just prints /n
This is what I have so far.
``
inputFile = open("demofile1.txt", "r")
outFile = open("Ji
string = line.split(',')
go =(string)[3::]
bo = [float(i) for i in go]
total = sum(bo)
pine = ("%8.2f"%total)
name = string[2] + "," + " " + string[1]
kale = (string[0] + " " + name + " " + "/n")
se)
Current Result
8
53 Baul
A999999
You need to use \n, not /n. So this line:
kale = (string[0] + " " + name + " " + "/n")
Should be:
kale = (string[0] + " " + name + " " + "\n")
Also, please do consider using a str formatter, so all these lines:
go =(string)[3::]
bo = [float(i) for i in go]
total = sum(bo)
pine = ("%8.2f"%total)
name = string[2] + "," + " " + string[1]
kale = (string[0] + " " + name + " " + "/n")
str1 = ''.join(kale)
str2 = ''.join(pine)
outFile.write(str1 + " " + str2 + " ")
Will become:
outFile.write("{} {} {:8.2f}\n".format(string[0], string[2] + ", " + string[1], sum(bo))

Python - key error when using "if in dict"

I am receiving the following error when running a script to parse contents of an XML file.
if iteration.findtext("Iteration_query-def") in ecdict:
KeyError: 'XLOC_000434'
I was under the impression that using "if in dict" would mean that if the key is not found in the dictionary, the script will continue past the if statement and proceed with the rest of the code. Below is the problematic section of the code I am using. I realise this is quite a basic question, but I am unsure what else I can say, and I don't understand why I am receiving this error.
import xml.etree.ElementTree as ET
tree = ET.parse('507.FINAL_14.2.14_2_nr.out_fmt5.out')
blast_iteration = tree.find("BlastOutput_iterations")
for iteration in blast_iteration.findall("Iteration"):
query = iteration.findtext("Iteration_query-def").strip().strip("\n")
if query in score:
continue
if iteration.findtext("Iteration_message") == "No hits found":
if iteration.findtext("Iteration_query-def") in tair:
tairid = tair[iteration.findtext("Iteration_query-def")][0]
tairdes = tair[iteration.findtext("Iteration_query-def")][1]
else:
tairid = "-"
tairdes = "-"
goterms = ""
ecterms = ""
if iteration.findtext("Iteration_query-def") in godict:
for x in godict[iteration.findtext("Iteration_query-def")][:-1]:
goterms = goterms + x + ";"
goterms = goterms + godict[iteration.findtext("Iteration_query-def")][-1]
else:
goterms = "-"
if iteration.findtext("Iteration_query-def") in ecdict:
for x in ecdict[iteration.findtext("Iteration_query-def")][:-1]:
ecterms = ecterms + x + ";"
ecterms = ecterms + ecdict[iteration.findtext("Iteration_query-def")][-1]
else:
ecterms = "-"
if iteration.findtext("Iteration_query-def") in godescr:
desc = godescr[iteration.findtext("Iteration_query-def")]
else:
desc = "-"
n += 1
p = "PvOAK_up"+str(n) + "\t" + tranlen[iteration.findtext("Iteration_query-def")] + "\t" + orflen[iteration.findtext("Iteration_query-def")] + "\t" + "-" + "\t" + "-" + "\t" + tairid + "\t" + tairdes + "\t" + goterms + "\t" + ecterms + "\t" + desc + "\t" + str(flower[query][2]) + "\t" + str('{0:.2e}'.format(float(flower[query][1]))) + "\t" + str('{0:.2f}'.format(float(flower[query][0]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][2]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][1]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][0])))
print p
Hope you can help,
Thanks.
edit: I should say that godict and ecdict were previously created as follows - I can submit the entire code if needs be:
godict = {}
ecdict = {}
godescr = {}
f = open("507.FINAL_14.2.14_2_nr.out_fmt5.out.annot")
for line in f:
line = line.split("\t")
if len(line) > 2:
godescr[line[0]] = line[2]
line[1] = line[1].strip("\n")
if line[1].startswith("EC"):
if line[0] in ecdict:
a = ecdict[line[0]]
a.append(line[1])
ecdict[line[0]] = a
else:
ecdict[line[0]] = [line[1]]
else:
if line[0] in godict:
a = godict[line[0]]
a.append(line[1])
godict[line[0]] = a
else:
godict[line[0]] = [line[1]]
Traceback:
Traceback (most recent call last):
File "2d.test.py", line 170, in <module>
p = "PvOAK_up"+str(n) + "\t" + tranlen[iteration.findtext("Iteration_query-def")] + "\t" + orflen[iteration.findtext("Iteration_query-def")] + "\t" + "-" + "\t" + "-" + "\t" + tairid + "\t" + tairdes + "\t" + goterms + "\t" + ecterms + "\t" + desc + "\t" + str(flower[query][2]) + "\t" + str('{0:.2e}'.format(float(flower[query][1]))) + "\t" + str('{0:.2f}'.format(float(flower[query][0]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][2]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][1]))) + "\t" + str('{0:.2f}'.format(float(leaf[query][0])))
KeyError: 'XLOC_000434'

Categories

Resources