Scheduling of GNURadio [general_] work function for custom source block - python

I'm trying to implement a GNURadio source block in Python, which has to produce a vector of a fixed size at each call of the [general_] work function.
As a first toy-example I tried to output just a vector of constant values which should change at each call of the [general_] work function.
import numpy
import sys
from gnuradio import gr
class my_source_vf(gr.sync_block):
"""
docstring for block
"""
def __init__(self, v_size):
self.v_size = v_size
self.mult = 1
self.buff = numpy.ones(v_size)
gr.sync_block.__init__(self,
name="my_source_vf",
in_sig=None,
#out_sig=[numpy.float32])
out_sig=[(numpy.float32, self.v_size)])
def work(self, input_items, output_items):
# <+signal processing here+>
print len(output_items)
out = output_items[0]
out[0][:] = self.buff*self.mult
self.mult = self.mult+1
return self.v_size
However, when I connect it to QT GUI Vector sink block I just see oscillations between 0 and 1, which let me think [general_] work function is called just once.

You mustn't return v_size – that's the length of one item, but you should return the number of items you've produced this call.

Related

Writing classes that simulate random walks

Regarding a question that I made yesterday: How can I implement (I think via inheritance) a new class, with a parameter of the parent class, and a paremeter which comes from another module?
Now my task continues this way:
Create a method of Simulation called get next event that chooses a neighboring node of the current walker’s position uniformly at random and draws a waiting time from an exponential distribution with parameter lambda_. The method should then return the time of the next move and the next position of the walker.
My code now looks like this:
import networkx as nx
import random
class RandomWalker:
def __init__(self, position=0):
self.position = position
class Simulation:
def __init__(self, random_walker, G, lambda_, t_end):
self.random_walker = random_walker
self.G = G
self.lambda_ = lambda_
self.t_end = t_end
G = nx.Graph()
random_walker = RandomWalker(position)
simulation = Simulation(random_walker, G, lambda_, t_end)
def get_next_event(self, G, lambda_):
neighbors = list(G.neighbors(random_walker))
next_event = random.choice(neighbors)
position = next_event
waiting_time = np.random.exponential(lambda_)
return 'Waiting time: '+waiting_time+ '. New position: '+position
Since I'm new to the "classes creation method", I still have in my code (I have python 3.8.1 on Spyder) errors about, for example, undefined name position at line 20, and others similar afterwards, although I have them in the classes initialization.
Now, I didn't understand if, when calling variables that were defined in the class initialization, e.g. that one in line 20, i have to write RandomWalker.positionmaybe?
Any help is appreciated, thank you.

How do I preserve the colours in a STEP file when modifying the geometry in Open Cascade?

I'm writing a script in python using Open Cascade Technology (using the pyOCCT package for Anaconda) to import STEP files, defeature them procedurally and re-export them. I want to preserve the product hierarchy, names and colours as much as possible. Currently the script can import STEP files, simplify all of the geometry while roughly preserving the hierarchy and re-export the step file. The problem is no matter how I approach the problem, I can't manage to make it preserve the colours of the STEP file in a few particular cases.
Here's the model I pass in to the script:
And here's the result of the simplification:
In this case, the simplification has worked correctly but the colours of some of the bodies were not preserved. The common thread is that the bodies that loose their colours are children of products which only have other bodies as their children (ie: they don't contain sub-products).
This seems to be related to the way that Open Cascade imports STEP files which are translated as follows:
Alright, now for some code:
from OCCT.STEPControl import STEPControl_Reader, STEPControl_Writer, STEPControl_AsIs
from OCCT.BRepAlgoAPI import BRepAlgoAPI_Defeaturing
from OCCT.TopAbs import TopAbs_FACE, TopAbs_SHAPE, TopAbs_COMPOUND
from OCCT.TopExp import TopExp_Explorer
from OCCT.ShapeFix import ShapeFix_Shape
from OCCT.GProp import GProp_GProps
from OCCT.BRepGProp import BRepGProp
from OCCT.TopoDS import TopoDS
from OCCT.TopTools import TopTools_ListOfShape
from OCCT.BRep import BRep_Tool
from OCCT.Quantity import Quantity_ColorRGBA
from OCCT.ShapeBuild import ShapeBuild_ReShape
from OCCT.STEPCAFControl import STEPCAFControl_Reader, STEPCAFControl_Writer
from OCCT.XCAFApp import XCAFApp_Application
from OCCT.XCAFDoc import XCAFDoc_DocumentTool, XCAFDoc_ColorGen, XCAFDoc_ColorSurf
from OCCT.XmlXCAFDrivers import XmlXCAFDrivers
from OCCT.TCollection import TCollection_ExtendedString
from OCCT.TDF import TDF_LabelSequence
from OCCT.TDataStd import TDataStd_Name
from OCCT.TDocStd import TDocStd_Document
from OCCT.TNaming import TNaming_NamedShape
from OCCT.Interface import Interface_Static
# DBG
def export_step(shape, path):
writer = STEPControl_Writer()
writer.Transfer( shape, STEPControl_AsIs )
writer.Write(path)
# DBG
def print_shape_type(label, shapeTool):
if shapeTool.IsFree_(label):
print("Free")
if shapeTool.IsShape_(label):
print("Shape")
if shapeTool.IsSimpleShape_(label):
print("SimpleShape")
if shapeTool.IsReference_(label):
print("Reference")
if shapeTool.IsAssembly_(label):
print("Assembly")
if shapeTool.IsComponent_(label):
print("Component")
if shapeTool.IsCompound_(label):
print("Compound")
if shapeTool.IsSubShape_(label):
print("SubShape")
# Returns a ListOfShape containing the faces to be removed in the defeaturing
# NOTE: For concisness I've simplified this algorithm and as such it *MAY* not produce exactly
# the same output as shown in the screenshots but should still do SOME simplification
def select_faces(shape):
exp = TopExp_Explorer(shape, TopAbs_FACE)
selection = TopTools_ListOfShape()
nfaces = 0
while exp.More():
rgb = None
s = exp.Current()
exp.Next()
nfaces += 1
face = TopoDS.Face_(s)
gprops = GProp_GProps()
BRepGProp.SurfaceProperties_(face, gprops)
area = gprops.Mass()
surf = BRep_Tool.Surface_(face)
if area < 150:
selection.Append(face)
#log(f"\t\tRemoving face with area: {area}")
return selection, nfaces
# Performs the defeaturing
def simplify(shape):
defeaturer = BRepAlgoAPI_Defeaturing()
defeaturer.SetShape(shape)
sel = select_faces(shape)
if sel[0].Extent() == 0:
return shape
defeaturer.AddFacesToRemove(sel[0])
defeaturer.SetRunParallel(True)
defeaturer.SetToFillHistory(False)
defeaturer.Build()
if (not defeaturer.IsDone()):
return shape# TODO: Handle errors
return defeaturer.Shape()
# Given the label of an entity it finds it's displayed colour. If the entity has no defined colour the parents are searched for defined colours as well.
def find_color(label, colorTool):
col = Quantity_ColorRGBA()
status = False
while not status and label != None:
try:
status = colorTool.GetColor(label, XCAFDoc_ColorSurf, col)
except:
break
label = label.Father()
return (col.GetRGB().Red(), col.GetRGB().Green(), col.GetRGB().Blue(), col.Alpha(), status, col)
# Finds all child shapes and simplifies them recursively. Returns true if there were any subshapes.
# For now this assumes all shapes passed into this are translated as "SimpleShape".
# "Assembly" entities should be skipped as we don't need to touch them, "Compound" entities should work with this as well, though the behaviour is untested.
# Use the print_shape_type(shapeLabel, shapeTool) method to identify a shape.
def simplify_subshapes(shapeLabel, shapeTool, colorTool, set_colours=None):
labels = TDF_LabelSequence()
shapeTool.GetSubShapes_(shapeLabel, labels)
#print_shape_type(shapeLabel, shapeTool)
#print(f"{shapeTool.GetShape_(shapeLabel).ShapeType()}")
cols = {}
for i in range(1, labels.Length()+1):
label = labels.Value(i)
currShape = shapeTool.GetShape_(label)
print(f"\t{currShape.ShapeType()}")
if currShape.ShapeType() == TopAbs_COMPOUND:
# This code path should never be taken as far as I understand
simplify_subshapes(label, shapeTool, colorTool, set_colours)
else:
''' See the comment at the bottom of the main loop for an explanation of the function of this block
col = find_color(label, colorTool)
#print(f"{name} RGBA: {col[0]:.5f} {col[1]:.5f} {col[2]:.5f} {col[3]:.5f} defined={col[4]}")
cols[label.Tag()] = col
if set_colours != None:
colorTool.SetColor(label, set_colours[label.Tag()][5], XCAFDoc_ColorSurf)'''
# Doing both of these things seems to result in colours being reset but the geometry doesn't get replaced
nshape = simplify(currShape)
shapeTool.SetShape(label, nshape) # This doesn't work
return labels.Length() > 0, cols
# Set up XCaf Document
app = XCAFApp_Application.GetApplication_()
fmt = TCollection_ExtendedString('MDTV-XCAF')
doc = TDocStd_Document(fmt)
app.InitDocument(doc)
shapeTool = XCAFDoc_DocumentTool.ShapeTool_(doc.Main())
colorTool = XCAFDoc_DocumentTool.ColorTool_(doc.Main())
# Import the step file
reader = STEPCAFControl_Reader()
reader.SetNameMode(True)
reader.SetColorMode(True)
Interface_Static.SetIVal_("read.stepcaf.subshapes.name", 1) # Tells the importer to import subshape names
reader.ReadFile("testcolours.step")
reader.Transfer(doc)
labels = TDF_LabelSequence()
shapeTool.GetShapes(labels)
# Simplify each shape that was imported
for i in range(1, labels.Length()+1):
label = labels.Value(i)
shape = shapeTool.GetShape_(label)
# Assemblies are just made of other shapes, so we'll skip this and simplify them individually...
if shapeTool.IsAssembly_(label):
continue
# This function call here is meant to be the fix for the bug described.
# The idea was to check if the TopoDS_Shape we're looking at is a COMPOUND and if so we would simplify and call SetShape()
# on each of the sub-shapes instead in an attempt to preserve the colours stored in the sub-shape's labels.
#status, loadedCols = simplify_subshapes(label, shapeTool, colorTool)
#if status:
#continue
shape = simplify(shape)
shapeTool.SetShape(label, shape)
# The code gets a bit messy here because this was another attempt at fixing the problem by building a dictionary of colours
# before the shapes were simplified and then resetting the colours of each subshape after simplification.
# This didn't work either.
# But the idea was to call this function once to generate the dictionary, then simplify, then call it again passing in the dictionary so it could be re-applied.
#if status:
# simplify_subshapes(label, shapeTool, colorTool, loadedCols)
shapeTool.UpdateAssemblies()
# Re-export
writer = STEPCAFControl_Writer()
Interface_Static.SetIVal_("write.step.assembly", 2)
Interface_Static.SetIVal_("write.stepcaf.subshapes.name", 1)
writer.Transfer(doc, STEPControl_AsIs)
writer.Write("testcolours-simplified.step")
There's a lot of stuff here for a minimum reproducible example but the general flow of the program is that we import the step file:
reader.ReadFile("testcolours.step")
reader.Transfer(doc)
Then we iterate through each label in the file (essentially every node in the tree):
labels = TDF_LabelSequence()
shapeTool.GetShapes(labels)
# Simplify each shape that was imported
for i in range(1, labels.Length()+1):
label = labels.Value(i)
shape = shapeTool.GetShape_(label)
We skip any labels marked as assemblies since they contain children and we only want to simplify individual bodies. We then call simplify(shape) which performs the simplification and returns a new shape, we then call shapeTool.SetShape() to bind the new shape to the old label.
The thing that doesn't work here is that as explained, Component3 and Component4 don't get marked as Assemblies and are treated as SimpleShapes and when they are simplified as one shape, the colours are lost.
One solution I attempted was to call a method simplify_subshapes() which would iterate through each of the subshapes, and do the same thing as the main loop, simplifying them and then calling SetShape(). This ended up being even worse as it resulted in those bodies not being simplified at all but still loosing their colours.
I also attempted to use the simplify_subshapes() method to make a dictionary of all the colours of the subshapes, then simplify the COMPOUND shape and then call the same method again to this time re-apply the colours to the subshapes using the dictionary (the code for this is commented out with an explanation as to what it did).
col = find_color(label, colorTool)
#print(f"{name} RGBA: {col[0]:.5f} {col[1]:.5f} {col[2]:.5f} {col[3]:.5f} defined={col[4]}")
cols[label.Tag()] = col
if set_colours != None:
colorTool.SetColor(label, set_colours[label.Tag()][5], XCAFDoc_ColorSurf)
As far as I see it the issue could be resolved either by getting open cascade to import Component3 and Component4 as Assemblies OR by finding a way to make SetShape() work as intended on subshapes.
Here's a link to the test file:
testcolours.step

scipy.minimize -- get cost function vs iteration?

Is there any way to access the cost function on a per-iteration basis with scipy.minimize without using the callback and re-executing the cost function?
options.disp seems to be intended to do this, but only causes the optimizer to print the termination message.
It would be fine to print it to stdout and use contextlib.redirect_stdout with io.StringIO to gather it and parse through the data after, but I can't find a way to efficiently access the cost function on each iteration.
The method least_squares does that with parameter verbose=2. However, it is not a general-purpose minimizer, its purpose to to minimize the sum of squares of the given functions. Example:
least_squares(lambda x: [x[0]*x[1]-6, x[0]+x[1]-5], [0, 0], verbose=2)
For other methods, like minimize, there is no such option. Instead of using callback and re-evaluating the cost function, you may want to add some logging to the function itself. For example, here fun appends the computed values to global variable cost_values:
def fun(x):
c = x[0]**2 - 2*x[0] + x[1]**4
cost_values.append(c)
return c
cost_values = []
minimize(fun, [3, 2])
print(cost_values)
In this example there are 4 similar function values for each iteration step, as the minimization algorithm looks around, computing the approximate Jacobian and/or Hessian. So, print(cost_values[::4]) would be the way to get one value of the cost function per step.
But it's not always 4 values per step (depends on dimension and the method used). So it's better to use a callback function to log the costs after each step. The current cost should be stored in a global variable, so it does not have to be recomputed.
def fun(x):
global current_cost
current_cost = x[0]**2 - 2*x[0] + x[1]**4
return current_cost
def log_cost(x):
cost_values.append(current_cost)
cost_values = []
minimize(fun, [3, 2], callback=log_cost)
print(cost_values)
This prints
[3.5058199763814986, -0.2358850818406083, -0.56104822688320077, -0.88774448831043995, -0.96018358963745964, -0.98750765702936738, -0.99588975368993771, -0.99867208501468863, -0.99956795994852465, -0.99985981414137615, -0.99995446605426996, -0.99998521591611178, -0.99999519917089297, -0.99999844105574265, -0.99999949379700426, -0.99999983560485239, -0.99999994662329761, -0.99999998266175671]
I figured out a sort of hack using stdlib features, it uses a "deep" redirect of sys.stdout. Note that this does not work with jupyter since IPython hijacks sys.stdout, which removes the .fileno attribute.
It may be possible to patch Jupyter using a tempfile.SpooledTemporaryFile in this way, removing this issue. I don't know.
I believe because this uses OS-level file descriptors, it is also not threadsafe.
import os
import sys
import tempfile
class forcefully_redirect_stdout(object):
''' Forces stdout to be redirected, for both python code and C/C++/Fortran
or other linked libraries. Useful for scraping values from e.g. the
disp option for scipy.optimize.minimize.
'''
def __init__(self, to=None):
''' Creates a new forcefully_redirect_stdout context manager.
Args:
to (`None` or `str`): what to redirect to. If type(to) is None,
internally uses a tempfile.SpooledTemporaryFile and returns a UTF-8
string containing the captured output. If type(to) is str, opens a
file at that path and pipes output into it, erasing prior contents.
Returns:
`str` if type(to) is None, else returns `None`.
'''
# initialize where we will redirect to and a file descriptor for python
# stdout -- sys.stdout is used by python, while os.fd(1) is used by
# C/C++/Fortran/etc
self.to = to
self.fd = sys.stdout.fileno()
if self.to is None:
self.to = tempfile.SpooledTemporaryFile(mode='w+b')
else:
self.to = open(to, 'w+b')
self.old_stdout = os.fdopen(os.dup(self.fd), 'w')
self.captured = ''
def __enter__(self):
self._redirect_stdout(to=self.to)
return self
def __exit__(self, *args):
self._redirect_stdout(to=self.old_stdout)
self.to.seek(0)
self.captured = self.to.read().decode('utf-8')
self.to.close()
def _redirect_stdout(self, to):
sys.stdout.close() # implicit flush()
os.dup2(to.fileno(), self.fd) # fd writes to 'to' file
sys.stdout = os.fdopen(self.fd, 'w') # Python writes to fd
if __name__ == '__main__':
import re
from scipy.optimize import minimize
def foo(x):
return 1/(x+0.001)**2 + x
with forcefully_redirect_stdout() as txt:
result = minimize(foo, [100], method='L-BFGS-B', options={'disp': True})
print('this appears before `disp` output')
print('here''s the output from disp:')
print(txt.captured)
lines_with_cost_function_values = \
re.findall(r'At iterate\s*\d\s*f=\s*-*?\d*.\d*D[+-]\d*', txt.captured)
fortran_values = [s.split()[-1] for s in lines_with_cost_function_values]
# fortran uses "D" to denote double and "raw" exp notation,
# fortran value 3.0000000D+02 is equivalent to
# python value 3.0000000E+02 with double precision
python_vals = [float(s.replace('D', 'E')) for s in fortran_values]
print(python_vals)

Storing output from Python function necessary despite not using output

I am trying to understand why I must store the output of a Python function (regardless of the name of the variable I use, and regardless of whether I subsequently use that variable). I think this is more general to Python and not specifically to the software NEURON, thus I put it here on Stackoverflow.
The line of interest is here:
clamp_output = attach_current_clamp(cell)
If I just write attach_current_clamp(cell), without storing the output of the function into a variable, the code does not work (plot is empty), and yet I don't use clamp_output at all. Why cannot I not just call the function? Why must I use a variable to store the output even without using the output?
import sys
import numpy
sys.path.append('/Applications/NEURON-7.4/nrn/lib/python')
from neuron import h, gui
from matplotlib import pyplot
#SET UP CELL
class SingleCell(object):
def __init__(self):
self.soma = h.Section(name='soma', cell=self)
self.soma.L = self.soma.diam = 12.6517
self.all = h.SectionList()
self.all.wholetree(sec=self.soma)
self.soma.insert('pas')
self.soma.e_pas = -65
for sec in self.all:
sec.cm = 20
#CURRENT CLAMP
def attach_current_clamp(cell):
stim = h.IClamp(cell.soma(1))
stim.delay = 100
stim.dur = 300
stim.amp = 0.2
return stim
cell = SingleCell()
#IF I CALL THIS FUNCTION WITHOUT STORING THE OUTPUT, THEN IT DOES NOT WORK
clamp_output = attach_current_clamp(cell)
#RECORD AND PLOT
soma_v_vec = h.Vector()
t_vec = h.Vector()
soma_v_vec.record(cell.soma(0.5)._ref_v)
t_vec.record(h._ref_t)
h.tstop = 800
h.run()
pyplot.figure(figsize=(8,4))
soma_plot = pyplot.plot(t_vec,soma_v_vec)
pyplot.show()
This is a NEURON+Python specific bug/feature. It has to do with Python garbage collection and the way NEURON implements the Python-HOC interface.
When there are no more references to a NEURON object (e.g. the IClamp) from within Python or HOC, the object is removed from NEURON.
Saving the IClamp as a property of the cell averts the problem in the same way as saving the result, so that could be an option for you:
# In __init__:
self.IClamps = []
# In attach_current_clamp:
stim.amp = 0.2
cell.IClamps.append(stim)
#return stim

MPI4PY Python Error 11 Creating too many threads

I am working with some code in Python and MPI4PY that is throwing a strange error. When I try to run the code below I it throws the following:
ERROR; return code from pthread_create() is 11
Error detail: Resource temporarily unavailable
sh: fork: retry: Resource temporarily unavailable
/home/sfortney/anaconda/lib/python2.7/site-packages/numexpr/cpuinfo.py:40: UserWarning: [Errno 11] Resource temporarily unavailable
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
I scaled up this code from a simpler, working MPI4PY script that I have posted as well below. From my research on this error it seems that I am creating too many threads. This seems odd to me as I am not calling any threading, just multiple processors (My basic understanding is that threads are an intra-core phenomenon which wouldn't be touched with if I was just calling multiple cores and doing one thing on each. Sorry if this is not true.).
I can't make sense of why the code at the bottom works perfectly but the code immediately below which uses the same structure does not. Why would the code below be running into thread constraints? And where in the code is it even calling multiple threads?
I have posted the whole code below for reproducibility of the error. If it is relevant I am running this on a 32 core LinuxBox.
#to run this call "mpiexec -n 10 python par_implement_wavefront.py" in terminal
from __future__ import division
import pandas as pd
import numpy as np
import itertools
import os
from itertools import chain, combinations
from operator import add
from collections import Counter
home="/home/sfortney"
np.set_printoptions(precision=2, suppress=True)
#choose dimensionality and granularity
dim=2
gran=5
from mpi4py import MPI
from mpi4py.MPI import ANY_SOURCE
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
command_buffer = np.zeros(3) # first entry is boolean, second is tuple with objective function inputs, third is array index
result_buffer=np.zeros(3) # first position is node,
if rank==0:
#defining all of our functions we will need on the root node first
#makes ax1 into the axes of an n dim array
def axis_fitter(arr, dim, gran, start=1, stop=101):
ax1=np.linspace(start,stop, num=gran)
for i in range(dim):
indexlist=[0]*dim
indexlist[i]= slice(None)
arr[indexlist]=ax1
return arr
#this is used to make the inital queues
#fix me to work with nan's!
def queue_init(arr):
queue=[]
queueposs=[]
queuedone=np.argwhere(arr >0).tolist()
return queue,queueposs,queuedone
#this is used in the queue updating function
def queue_sorter(queue):
queue.sort(key=lambda x: np.linalg.norm(np.array(x)))
# using the L1 norm
# queue.sort(key=lambda x: sum(x))
return queue
#this finds all the indicies to the "back" of our box
def back_index(dim):
standardbasis=[]
for i in range(dim):
vec=[0]*dim
vec[i]=vec[i]+1
standardbasis.append(vec)
powerset=[]
for z in chain.from_iterable(combinations(standardbasis,r) for r in range(len(standardbasis)+1)):
powerset.append(z)
powersetnew=[]
for i in range(len(powerset)):
powersetnew.append([sum(x) for x in zip(*list(powerset[i]))])
powersetnew.remove([])
powersetnew=[[i*(-1) for i in x] for x in powersetnew]
return powersetnew
#this takes a completed index and updates our queue of possible values
#as well as our done queue
def queue_update(queue,queueposs,queuedone, arr,dim,comp_idx=[0,0]):
queuedone.append(comp_idx)
if comp_idx==[0,0]:
init_index=[1]*dim
queue.append(init_index)
for i in range(dim):
poss_index=[1]*dim
poss_index[i]=2
queueposs.append(poss_index)
return queue,queueposs,queuedone
else:
queuedone.append(comp_idx)
try:
queueposs.remove(comp_idx)
except:
pass
for i in range(dim):
new_idx=comp_idx[:]
new_idx[i]=new_idx[i]+1
back_list=back_index(dim)
back_list2=[]
for x in back_list:
back_list2.append(list(np.add(np.asarray(new_idx),np.asarray(x))))
if set(tuple(x) for x in back_list2).issubset(set(tuple(x) for x in queuedone)):
queueposs.append(new_idx)
queueposs=list(set(tuple(x) for x in queueposs)-set(tuple(x) for x in queuedone))
queueposs=[list(x) for x in queueposs]
queueposs=queue_sorter(queueposs)
try:
for x in range(len(queueposs)):
queueappender=(queueposs).pop(x)
queue.append(queueappender)
except:
print "queueposs empty"
queue=queue_sorter(queue)
return queue,queueposs,queuedone
#this function makes it so we dont have to pass the whole array through MPI but only the pertinent information
def objectivefuncprimer(arr, queue_elem, dim):
inputs=back_index(dim)
inputs2=[]
for x in inputs:
inputs2.append(list(np.add(np.asarray(queue_elem),np.asarray(x))))
inputs3=[]
for x in range(len(inputs2)):
inputs3.append(arr[tuple(inputs2[x])])
return inputs3
#this function takes a value and an index and assigns the array that value at the index
def arrupdater(val,idx):
arr[tuple(idx)]=val
return arr, idx
#########Initializing
all_finished=False
#make our empty array
sizer=tuple([gran]*dim)
arr=np.zeros(shape=sizer)
nodes_avail=range(1, size) # 0 is not a worker
#assumes axes all start at same place
ax1=np.linspace(20,30, num=gran)
arr=axis_fitter(arr, dim, gran)
#fitting axes and initializing queues
arr=axis_fitter(arr, dim, gran, start=20, stop=30)
queue,queueposs,queuedone =queue_init(arr)
#running first updater
queue,queueposs,queuedone=queue_update(queue,queueposs,queuedone,arr,dim)
def sender(queue):
send_num=min(len(queue),len(nodes_avail))
for k in range(send_num):
node=nodes_avail.pop()
queue_elem=queue.pop(k)
command_buffer[0]=int(all_finished)
command_buffer[1]=queue_elem
command_buffer[2]=objectivefuncprimer(arr,queue_elem,dim)
comm.Send(command_buffer, dest=node)
while all_finished==False:
sender(queue)
comm.Recv(result_buffer,source=MPI.ANY_SOURCE)
arr,comp_idx=arrupdater(result_buffer[1],result_buffer[2])
queue,queueposs,queuedone=queue_update(queue,queueposs,queuedone,arr,dim,comp_idx)
nodes_avail.append(result_buffer[0])
if len(queuedone)==gran**2:
for n in range(1, size):
comm.Send(np.array([True,0,0]), dest=n)
all_finished=True
print arr
if rank>0:
all_finished_worker=False
#this test function will only work in 2d
def objectivefunc2d_2(inputs):
#this will be important for more complicated functions later
#backnum=(2**dim)-1
val=sum(inputs)
return val
while all_finished_worker==False:
comm.Recv(command_buffer, source=0)
all_finished_worker=bool(command_buffer[0])
if all_finished_worker==False:
result=objectivefunc2d_2(command_buffer[2])
# print str(result) +" from "+str(rank)
result_buffer=np.array([rank,result,command_buffer[1]])
comm.Send(result_buffer, dest=0)
This code works and is of the basic structure that I used above but on a much, much more simple example.
from __future__ import division
import numpy as np
import os
from itertools import chain, combinations
from mpi4py import MPI
from mpi4py.MPI import ANY_SOURCE
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
command_buffer = np.zeros(2) # first entry is boolean, rest is data
result_buffer=np.zeros(2) # first position is node, rest is data
if rank==0:
all_finished=False
nodes_avail=range(1, size) # 0 is not a worker
arr=[]
q=range(20)
def primer(q):
return int(all_finished),q
def sender(q):
send_num=min(len(q),len(nodes_avail))
for k in range(send_num):
node=nodes_avail.pop()
queue_init=q.pop()
command_buffer[0]=primer(queue_init)[0]
command_buffer[1]=primer(queue_init)[1]
comm.Send(command_buffer, dest=node)
while all_finished==False:
sender(q)
# update q
comm.Recv(result_buffer,source=MPI.ANY_SOURCE)
arr.append(result_buffer[1])
nodes_avail.append(result_buffer[0])
if len(arr)==20:
for n in range(1, size):
comm.Send(np.array([True,0]), dest=n)
all_finished=True
print arr
if rank>0:
all_finished_worker=False
while all_finished_worker==False:
comm.Recv(command_buffer, source=0)
all_finished_worker=bool(command_buffer[0])
if all_finished_worker==False:
result=command_buffer[1]*2
# print str(result) +" from "+str(rank)
result_buffer=np.array([rank,result])
comm.Send(result_buffer, dest=0)

Categories

Resources