Converting a serial task to parallel to map inputs and outputs - python

I have tens of thousands of simulations to run on a system with several cores. Currently, it is done in serial, where I know my input parameters, and store my results in a dict.
Serial version
import time
import random
class MyModel(object):
input = None
output = None
def run(self):
time.sleep(random.random()) # simulate a complex task
self.output = self.input * 10
# Run serial tasks and store results for each parameter
parameters = range(10)
results = {}
for p in parameters:
m = MyModel()
m.input = p
m.run()
results[p] = m.output
print('results: ' + str(results))
Which takes <10 seconds, and displays correct results:
results: {0: 0, 1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60, 7: 70, 8: 80, 9: 90}
Parallel version
My attempts to parallelize this procedure are based on the example in the multiprocessing module near the text "An example showing how to use queues to feed tasks to a collection of worker processes and collect the results" (sorry, no URL anchor available).
The following builds on the top half of the serial version:
from multiprocessing import Process, Queue
NUMBER_OF_PROCESSES = 4
def worker(input, output):
for args in iter(input.get, 'STOP'):
m = MyModel()
m.input = args[0]
m.run()
output.put(m.output)
# Run parallel tasks and store results for each parameter
parameters = range(10)
results = {}
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
tasks = [(t,) for t in parameters]
for task in tasks:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get unordered results
for i in range(len(tasks)):
results[i] = done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
print('results: ' + str(results))
Takes only a few seconds now, but the mapping orders between inputs and results are mixed up.
results: {0: 10, 1: 0, 2: 60, 3: 40, 4: 20, 5: 80, 6: 30, 7: 90, 8: 70, 9: 50}
I realise that I'm populating the results based on an unordered done_queue.get(), but I'm not sure how to get the correct mapping to task_queue. Any ideas? Any other way to make this somehow cleaner?

A-ha! The worker needs to embed some kind of ID, such as the input parameter(s) used to return to the output queue, which can be used to identify the returned process. Here are the required modifications:
def worker(input, output):
for args in iter(input.get, 'STOP'):
m = MyModel()
m.input = args[0]
m.run()
# Return a tuple of an ID (the input parameter), and the model output
return_obj = (m.input, m.output)
output.put(return_obj)
and
# Get unordered results
for i in range(len(tasks)):
# Unravel output tuple, which has the input parameter 'p' used as an ID
p, result = done_queue.get()
results[p] = result

Related

Python code for Round Robin Scheduling algorithm for CPU scheduling

Need help to complete the code for Round Robin Scheduling algorithm for CPU scheduling.
Each process takes an equal share of CPU time which is equal to a time quantum of 2 units.
After being processed for 2 time quantums, if the process still requires more computation,
it is passed to a waiting queue.
The code should do the following:
Report the time each process is completed
Report wait times of each process in the queue
The #CODE indicates where the code is missing.
from collections import deque
time_quantum = 2
class Process:
def __init__(self, name, arrival_time, required_time):
self.name = name
self.arrival_time = arrival_time
self.required_time = required_time
self.time_processed = 0
def __repr__(self):
return self.name
p0 = Process('P1', 0, 4)
p1 = Process('P2', 1, 3)
p2 = Process('P3', 2, 2)
p3 = Process('P4', 3, 1)
processes = [p0, p1, p2, p3]
end_times = {process.name:0 for process in processes}
wait_times = {process.name:0 for process in processes}
queue = deque()
running_proc = None # Tracks running process in the CPU
running_proc_time = 0 # Tracks the time running process spent in the CPU
for t in range(11):
#CODE
print(end_times) # End times for each process
print(wait_times) # Wait times for each process in the queue

How to get return value of different functions in a for loop with multiprocessing

I'm currently making some numerical solver for current simulation. To make my code faster, I made a function that returns the result of elementwise matrix multiplication, and gradient... and so on.
def mmul(A, B, procname, return_dict):
return_dict[procname] = np.multiply(A,B)
def mgrad(A, procname, return_dict):
return_dict[procname] = np.gradient(A/dx)
def madd(A, B, procname, return_dict):
return_dict[procname] = A+B
Now here's the body of the code. I first made a dictionary(return_dict) and store the results for each processing units, and get the values(Vgrad, Pgrad, Psquare) from the dictionary.
for k in range(0,max_iter-1, 1):
#0. Firstly generate all of the auxiliay calculation arrays
post_V, post_p, Vij_coeff = np.zeros((3, lx, ly), dtype = float)
# Calculate carrier density of the next step
processes = []
#---------------------------- # Const/mtx for calculating p
manager = multiprocessing.Manager()
return_dict = manager.dict()
p0 = multiprocessing.Process(target = mgrad, args = (V, Vgrad, return_dict))
processes.append(p0)
p0.start()
p1 = multiprocessing.Process(target = mgrad, args = (p, Pgrad, return_dict))
processes.append(p1);p1.start()
p2 = multiprocessing.Process(target = mmul, args = (p,p, Psquare, return_dict))
processes.append(p2);p2.start()
for process in processes:
process.join()
Vgrad = return_dict['Vgrad']
Pgrad = return_dict['Pgrad']
Psquare = return_dict['Psquare']
However, this code makes the error below
PicklingError: Can't pickle <function mgrad at 0x000002776C3614C8>: it's not the same object as __main__.mgrad
Is there any solutions to get the calculated value of the function, while running in multiprocessor?

Adding values in dictionary multiprocessing

I need to add values in dictionary using parallelism, in a code below I have used multiprocessing.Process to call function that shoud write value in dictionaty, but it didnt
import multiprocessing
import random
manager = multiprocessing.Manager()
users = [1, 2, 3, 4, 5, 6, 7, 8, 9, 100]
def random_numer_check(user, dict):
dict[user] = random.randint(0, 1000)
if __name__ == '__main__':
return_dict = manager.dict()
jobs = []
for i in range(10):
p = multiprocessing.Process(target=random_numer_check, args=(users, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
I expected that result of print(return_dict) will be like
{1: 169, 2: 520, 3: 637, 4: 559, 5: 497, 6: 470, 7: 113, 8: 221, 9: 946, 100: 69}
But it just returns empty dictionary
You have a few errors in your code it seems. You are trying to pass the whole user list to random_number_check. As lists are an unhashable type they cannot be passed to a multiprocessing Process easily.
The code below will work, although I would advise altering your code to follow the Python style guidelines.
import multiprocessing
import random
users = [1, 2, 3, 4, 5, 6, 7, 8, 9, 100]
manager = multiprocessing.Manager()
def random_number_check(user, dic):
dic[user] = random.randint(0, 1000)
if __name__ == '__main__':
return_dict = manager.dict()
jobs = []
for i in range(10):
# Pass the user at position i instead of the whole list
p = multiprocessing.Process(target=random_number_check, args=(users[i], return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
print(return_dict)

Python multiprocessing not working as intended with fuzzywuzzy

Either my processes kicking off one after another finishes or they start (simultaneously) but without calling the pointing function. I tried many variants somehow it will not act like many tutorials teach.
My Goal is to fuzzywuzzy String match a 80k item list of text sentences, droping unneccessary 90%+ matches while keeping the String with the most information (scorer=fuzz.token_set_ratio).
Thank you!
IDE is Anaconda Spyder 4.0, IPython 7.10.1, Python 3.7.5
# -*- coding: utf-8 -*-
import pandas as pd
import multiprocessing
import time
from datetime import datetime
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
#########
preparedDF = []
df1 = []
df2 = []
df3 = []
df4 = []
df5 = []
df6 = []
df7 = []
df8 = []
#########
xdf1 = []
xdf2 = []
xdf3 = []
xdf4 = []
xdf5 = []
xdf6 = []
xdf7 = []
xdf8 = []
#########
def fuzzyPrepare():
#load data do some easy cleaning
global preparedDF
df = pd.read_csv("newEN.csv")
df = df["description"].fillna("#####").tolist()
df = list(dict.fromkeys(df))
try:
df = df.remove("#####")
except ValueError:
pass
preparedDF=df
def fuzzySplit(df=preparedDF):
#split data to feed processes
global df1, df2, df3, df4, df5, df6, df7, df8
df1 = df[:100]
df2 = df[100:200]
df3 = df[200:300]
df4 = df[300:400]
df5 = df[400:500]
df6 = df[500:600]
df7 = df[600:700]
df8 = df[700:800]
def fuzzyMatch(x):
#process.dedupe returns dict_keys object so pass it to a list()
global xdf1, xdf2, xdf3, xdf4, xdf5, xdf6, xdf7, xdf8
if x == 1:
xdf1=list(process.dedupe(df1,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 2:
xdf2=list(process.dedupe(df2,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 3:
xdf3=list(process.dedupe(df3,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 4:
xdf4=list(process.dedupe(df4,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 5:
xdf5=list(process.dedupe(df5,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 6:
xdf6=list(process.dedupe(df6,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 7:
xdf7=list(process.dedupe(df7,threshold=90,scorer=fuzz.token_set_ratio))
elif x == 8:
xdf8=list(process.dedupe(df8,threshold=90,scorer=fuzz.token_set_ratio))
else:
return "error in fuzzyCases!"
#if __name__ == '__main__':
fuzzyPrepare()
fuzzySplit(preparedDF)
#UNHEEDED MULTIPROCESSING, ONLY THIS LINE TRIGGERS THE ACTUAL FUNCTION -> p1 = multiprocessing.Process(name="p1",target=fuzzyMatch(1), args=(1,))
p1 = multiprocessing.Process(name="p1",target=fuzzyMatch, args=(1,))
p2 = multiprocessing.Process(name="p2",target=fuzzyMatch, args=(2,))
p3 = multiprocessing.Process(name="p3",target=fuzzyMatch, args=(3,))
p4 = multiprocessing.Process(name="p4",target=fuzzyMatch, args=(4,))
p5 = multiprocessing.Process(name="p5",target=fuzzyMatch, args=(5,))
p6 = multiprocessing.Process(name="p6",target=fuzzyMatch, args=(6,))
p7 = multiprocessing.Process(name="p7",target=fuzzyMatch, args=(7,))
p8 = multiprocessing.Process(name="p8",target=fuzzyMatch, args=(8,))
jobs = []
jobs.append(p1)
jobs.append(p2)
jobs.append(p3)
jobs.append(p4)
jobs.append(p5)
jobs.append(p6)
jobs.append(p7)
jobs.append(p8)
for j in jobs:
print("process "+ j.name +" started at "+ datetime.now().strftime('%H:%M:%S'))
j.start()
time.sleep(0.3)
for j in jobs:
j.join()
print ("processing complete at "+datetime.now().strftime('%H:%M:%S'))
Ok, you are dealing with a non-trivial problem here. I have taken the liberty
to DRY (Don't Repeat Yourself)
your code a bit. I also dont have your data or pandas installed so I have simplified
the inputs and outputs. The principles however are all the same and with few changes
you should be able to make your code work!
Attempt #1
I have an array of 800 int elements and each process is going to calculate the
sum of 100 of them. Look for # DRY: comments
# -*- coding: utf-8 -*-
import multiprocessing
import time
from datetime import datetime
#########
number_of_proc = 8
preparedDF = []
# DRY: This is now a list of lists. This allows us to refer to df1 as dfs[1]
dfs = []
# DRY: A dict of results. The key will be int (the process number!)
xdf = {}
#########
def fuzzyPrepare():
global preparedDF
# Generate fake data
preparedDF = range(number_of_proc * 100)
def fuzzySplit(df):
#split data to feed processes
global dfs
# DRY: Loop and generate N lists for N processes
for i in range(number_of_proc):
from_element = i * 100
to_element = from_element + 100
print("Packing [{}, {})".format(from_element, to_element))
dfs.append(df[from_element:to_element])
def fuzzyMatch(x):
global xdf
# DRY: Since we now have a dict, all the if-else is not needed any more...
xdf[x] = sum(dfs[x])
print("In process: x={}, xdf[{}]={}".format(x, x, xdf[x]))
if __name__ == '__main__':
fuzzyPrepare()
fuzzySplit(preparedDF)
# DRY: Create N processes AND append them
jobs = []
for p in range(number_of_proc):
p = multiprocessing.Process(name="p{}".format(p),target=fuzzyMatch, args=(p,))
jobs.append(p)
for j in jobs:
print("process "+ j.name +" started at "+ datetime.now().strftime('%H:%M:%S'))
j.start()
time.sleep(0.3)
for j in jobs:
j.join()
print ("processing complete at "+datetime.now().strftime('%H:%M:%S'))
print("results:")
for x in range(number_of_proc):
print("In process: x={}, xdf[{}]={}".format(x, x, xdf[x]))
Output:
Packing [0, 100)
Packing [100, 200)
Packing [200, 300)
Packing [300, 400)
Packing [400, 500)
Packing [500, 600)
Packing [600, 700)
Packing [700, 800)
process p0 started at 19:12:00
In process: x=0, xdf[0]=4950
process p1 started at 19:12:00
In process: x=1, xdf[1]=14950
process p2 started at 19:12:00
In process: x=2, xdf[2]=24950
process p3 started at 19:12:01
In process: x=3, xdf[3]=34950
process p4 started at 19:12:01
In process: x=4, xdf[4]=44950
process p5 started at 19:12:01
In process: x=5, xdf[5]=54950
process p6 started at 19:12:01
In process: x=6, xdf[6]=64950
process p7 started at 19:12:02
In process: x=7, xdf[7]=74950
processing complete at 19:12:02
results:
Traceback (most recent call last):
File "./tmp/proctest.py", line 58, in <module>
print("In process: x={}, xdf[{}]={}".format(x, x, xdf[x]))
KeyError: 0
What happened? I printed the values in the processing function and they were there?!
Well, I am not an expert but a python process works much like fork().
The basic principle is that it will spawn and initialize a new child process. The
child process will be having a COPY(!) of the parents memory. This means that
the parent and child processes do not share any data/memory!!!
So in our case:
We prepare our data
We create N processes
Each process has a COPY of dfs and xdf variables
While for dfs we do not care too much (since they are used for input), each
process now has it own xdf and not the parent's one! You see why the KeyError?
How to fix this (Attempt #2)
It is now obvious that we need to return data back from the process to the parent.
There are many ways of doing this but the simpest (code-wise) is to use a
multiprocessing.Manager to share data between your child processes (look for # NEW:
tag in the code - Note I have only changed 2 lines!):
# -*- coding: utf-8 -*-
import multiprocessing
import time
from datetime import datetime
# NEW: This can manage data between processes
from multiprocessing import Manager
#########
number_of_proc = 8
preparedDF = []
dfs = []
# NEW: we create a manager object to store the results
manager = Manager()
xdf = manager.dict()
#########
def fuzzyPrepare():
global preparedDF
# Generate fake data
preparedDF = range(number_of_proc * 100)
def fuzzySplit(df):
#split data to feed processes
global dfs
# DRY: Loop and generate N lists for N processes
for i in range(number_of_proc):
from_element = i * 100
to_element = from_element + 100
print("Packing [{}, {})".format(from_element, to_element))
dfs.append(df[from_element:to_element])
def fuzzyMatch(x):
global xdf
# DRY: Since we no have a dict, all the if-else is not needed any more...
xdf[x] = sum(dfs[x])
print("In process: x={}, xdf[{}]={}".format(x, x, xdf[x]))
if __name__ == '__main__':
fuzzyPrepare()
fuzzySplit(preparedDF)
# DRY: Create N processes AND append them
jobs = []
for p in range(number_of_proc):
p = multiprocessing.Process(name="p{}".format(p),target=fuzzyMatch, args=(p,))
jobs.append(p)
for j in jobs:
print("process "+ j.name +" started at "+ datetime.now().strftime('%H:%M:%S'))
j.start()
time.sleep(0.3)
for j in jobs:
j.join()
print ("processing complete at "+datetime.now().strftime('%H:%M:%S'))
print("results:")
for x in range(number_of_proc):
print("Out of process: x={}, xdf[{}]={}".format(x, x, xdf[x]))
And the output:
Packing [0, 100)
Packing [100, 200)
Packing [200, 300)
Packing [300, 400)
Packing [400, 500)
Packing [500, 600)
Packing [600, 700)
Packing [700, 800)
process p0 started at 19:34:50
In process: x=0, xdf[0]=4950
process p1 started at 19:34:50
In process: x=1, xdf[1]=14950
process p2 started at 19:34:50
In process: x=2, xdf[2]=24950
process p3 started at 19:34:51
In process: x=3, xdf[3]=34950
process p4 started at 19:34:51
In process: x=4, xdf[4]=44950
process p5 started at 19:34:51
In process: x=5, xdf[5]=54950
process p6 started at 19:34:52
In process: x=6, xdf[6]=64950
process p7 started at 19:34:52
In process: x=7, xdf[7]=74950
processing complete at 19:34:52
results:
Out of process: x=0, xdf[0]=4950
Out of process: x=1, xdf[1]=14950
Out of process: x=2, xdf[2]=24950
Out of process: x=3, xdf[3]=34950
Out of process: x=4, xdf[4]=44950
Out of process: x=5, xdf[5]=54950
Out of process: x=6, xdf[6]=64950
Out of process: x=7, xdf[7]=74950
Read more about this here and
note the warning about Manager being slower than a multiprocessing.Array (which actually also solves your problem here)

Qthread locking up Gui PySide

I am trying to run a process in a separate thread but it is freezing my Gui and I cant understand why.
I am initialising the thread in the init function of my class:
self.cipher = Cipher()
self.cipher_thread = QThread()
self.cipher.moveToThread(self.cipher_thread)
self.cipher_thread.started.connect(lambda: self.cipher.encrypt(self.plaintext_file_path,
self.ciphertext_file_path,
self.init_vector,
self.key))
self.cipher_thread.start()
The encrypt method of the cipher class is:
def encrypt(self):
# check that both the key and the initialisation vector are 16 bytes long
if len(self.k) == self.key_byte_length and len(self.init_vector) == self.byte_length:
if not self.used:
self.used = True
# get the padding bytes and store in a list
self.padding_bytes = self.__getPaddingBytes()
# generate sub keys
# initial subkey is first four words from key
init_subkey_words = []
for i in range(0, self.key_byte_length-3,4):
init_subkey_words.append(self.k[i:i+4])
self.__genSubKeys(init_subkey_words)
# read file and append the padding to it
with open(self.plaintext_file_path, 'rb') as f:
self.plaintext_data = bytearray(f.read())
self.plaintext_data += self.padding_bytes
# set total size
self.total_size_bytes = len(self.plaintext_data)
# insert the initialisation vector as the first 16 bytes in the ciphertext data
self.ciphertext_data = self.init_vector
'''
begin encryption
--------------------------------------------------------------------------------------------------------
'''
self.start_time = datetime.datetime.now()
# loop through the file 16 bytes at a time
for i in range(0, int(len(self.plaintext_data)), self.byte_length): # i increases by 16 each loop
# if self.block_time is not None:
# print('block time is', datetime.datetime.now()-self.block_time)
self.block_time = datetime.datetime.now()
# set the 16 byte state - bytearray Object
state = copy.deepcopy(self.plaintext_data[i:i+self.byte_length])
# xor the state with the initialisation vector and first subkey
for j in range(self.byte_length):
state[j] ^= self.init_vector[j]
state[j] ^= self.sub_keys[0][j]
# round start
# --------------------------------------------------------------------------------------------------
for j in range(self.num_rounds):
self.current_round += 1 # increment current round counter
'''
arrange the data into a 4x4 matrix
[[1, 5, 9, 13],
[2, 6, 10, 14],
[3, 7, 11, 15],
[4, 8, 12, 16]]
'''
state_matrix = np.array(state)
state_matrix.resize(4, 4)
state_matrix.swapaxes(0, 1)
# byte substitution
# ----------------------------------------------------------------------------------------------
for row in state_matrix:
for byte in row:
byte = self.__sBoxSubstitution(byte)
# shift row - row k shifts left k places
# ----------------------------------------------------------------------------------------------
state_matrix = state_matrix.tolist()
for row in range(1, 4):
for l in range(0, row):
state_matrix[row].append(state_matrix[row].pop(0))
state_matrix = np.array(state_matrix)
# mix column - not included in last round
# ----------------------------------------------------------------------------------------------
if self.current_round is not self.num_rounds:
# swap axes of state matrix
state_matrix.swapaxes(0, 1)
# create temporary holder for the computed values
mixed_col_bytes = [[], [], [], []]
for k in range(4):
for l in range(4):
mixed_col_bytes[k].append(
self.__GFMult(self.MIX_COL_MATRIX[l][0], state_matrix[k][0]) ^
self.__GFMult(self.MIX_COL_MATRIX[l][1], state_matrix[k][1]) ^
self.__GFMult(self.MIX_COL_MATRIX[l][2], state_matrix[k][2]) ^
self.__GFMult(self.MIX_COL_MATRIX[l][3], state_matrix[k][3]))
# restore state matrix from temporary holder and swap axes back
state_matrix = np.array(copy.deepcopy(mixed_col_bytes))
state_matrix.swapaxes(0, 1)
# restore single bytearray state
state_matrix = state_matrix.flatten()
state_matrix = state_matrix.tolist()
state = bytearray(state_matrix)
# key addition
# ----------------------------------------------------------------------------------------------
for k in range(self.byte_length):
state[k] ^= self.sub_keys[self.current_round][k]
self.ciphertext_data += state # append state to ciphertext data
self.init_vector = self.ciphertext_data[-16:] # update the initialisation vector
self.current_round = 0 # reset current round number
self.completed_size_bytes += self.byte_length
self.percent_done = (self.completed_size_bytes/self.total_size_bytes)*100
self.updateProgressSig.emit(int(self.percent_done))
# finish encryption
self.__saveEncryptedData()
print('total encryption time:', datetime.datetime.now() - self.start_time)
# finish
self.finish(self.ciphertext_file_path)
# either the key of the initialisation vector are not the correct length
else:
print(' either the key length or initialisation vector is the wrong length')
print('---')
print('key length:', len(self.k))
print('iv length:', len(self.init_vector))
The issue you are experiencing is that the function you are connecting to the started signal is not run in the thread, it's run in the context of where it was set, which seems to be your UI thread.
Normally you would want to create a custom class which inherits from QThread, and any code that you want to be executed would be in the run() function of that class. Like so:
class MyTask(QThread):
def __init__ (self):
QThread.__init__(self)
def run(self):
print("Code to run in the thread goes here.")
If that seems like overkill you could just set the value of self.cipher_thread.run to your own function. Here's an example:
import time
from PySide.QtCore import QThread
from PySide import QtGui
app = QtGui.QApplication("")
def main():
task = SomeTask()
thread = QThread()
# Just some variables to pass into the task
a, b, c = (1, 2, 3)
thread.run = lambda: task.runTask(a, b, c)
print("Starting thread")
thread.start()
# Doing this so the application does not exit while we wait for the thread to complete.
while not thread.isFinished():
time.sleep(1)
print("Thread complete")
class SomeTask():
def runTask(self, a, b, c):
print a, b, c
print("runTask Started")
time.sleep(5)
print("runTask Complete")
if __name__ == "__main__":
main()
As Ekhumoro suggested, I was running into issues with the GIL. using the Multiprocessing module has worked for me.

Categories

Resources