I am trying to understand why I must store the output of a Python function (regardless of the name of the variable I use, and regardless of whether I subsequently use that variable). I think this is more general to Python and not specifically to the software NEURON, thus I put it here on Stackoverflow.
The line of interest is here:
clamp_output = attach_current_clamp(cell)
If I just write attach_current_clamp(cell), without storing the output of the function into a variable, the code does not work (plot is empty), and yet I don't use clamp_output at all. Why cannot I not just call the function? Why must I use a variable to store the output even without using the output?
import sys
import numpy
sys.path.append('/Applications/NEURON-7.4/nrn/lib/python')
from neuron import h, gui
from matplotlib import pyplot
#SET UP CELL
class SingleCell(object):
def __init__(self):
self.soma = h.Section(name='soma', cell=self)
self.soma.L = self.soma.diam = 12.6517
self.all = h.SectionList()
self.all.wholetree(sec=self.soma)
self.soma.insert('pas')
self.soma.e_pas = -65
for sec in self.all:
sec.cm = 20
#CURRENT CLAMP
def attach_current_clamp(cell):
stim = h.IClamp(cell.soma(1))
stim.delay = 100
stim.dur = 300
stim.amp = 0.2
return stim
cell = SingleCell()
#IF I CALL THIS FUNCTION WITHOUT STORING THE OUTPUT, THEN IT DOES NOT WORK
clamp_output = attach_current_clamp(cell)
#RECORD AND PLOT
soma_v_vec = h.Vector()
t_vec = h.Vector()
soma_v_vec.record(cell.soma(0.5)._ref_v)
t_vec.record(h._ref_t)
h.tstop = 800
h.run()
pyplot.figure(figsize=(8,4))
soma_plot = pyplot.plot(t_vec,soma_v_vec)
pyplot.show()
This is a NEURON+Python specific bug/feature. It has to do with Python garbage collection and the way NEURON implements the Python-HOC interface.
When there are no more references to a NEURON object (e.g. the IClamp) from within Python or HOC, the object is removed from NEURON.
Saving the IClamp as a property of the cell averts the problem in the same way as saving the result, so that could be an option for you:
# In __init__:
self.IClamps = []
# In attach_current_clamp:
stim.amp = 0.2
cell.IClamps.append(stim)
#return stim
Related
I'm writing a script in python using Open Cascade Technology (using the pyOCCT package for Anaconda) to import STEP files, defeature them procedurally and re-export them. I want to preserve the product hierarchy, names and colours as much as possible. Currently the script can import STEP files, simplify all of the geometry while roughly preserving the hierarchy and re-export the step file. The problem is no matter how I approach the problem, I can't manage to make it preserve the colours of the STEP file in a few particular cases.
Here's the model I pass in to the script:
And here's the result of the simplification:
In this case, the simplification has worked correctly but the colours of some of the bodies were not preserved. The common thread is that the bodies that loose their colours are children of products which only have other bodies as their children (ie: they don't contain sub-products).
This seems to be related to the way that Open Cascade imports STEP files which are translated as follows:
Alright, now for some code:
from OCCT.STEPControl import STEPControl_Reader, STEPControl_Writer, STEPControl_AsIs
from OCCT.BRepAlgoAPI import BRepAlgoAPI_Defeaturing
from OCCT.TopAbs import TopAbs_FACE, TopAbs_SHAPE, TopAbs_COMPOUND
from OCCT.TopExp import TopExp_Explorer
from OCCT.ShapeFix import ShapeFix_Shape
from OCCT.GProp import GProp_GProps
from OCCT.BRepGProp import BRepGProp
from OCCT.TopoDS import TopoDS
from OCCT.TopTools import TopTools_ListOfShape
from OCCT.BRep import BRep_Tool
from OCCT.Quantity import Quantity_ColorRGBA
from OCCT.ShapeBuild import ShapeBuild_ReShape
from OCCT.STEPCAFControl import STEPCAFControl_Reader, STEPCAFControl_Writer
from OCCT.XCAFApp import XCAFApp_Application
from OCCT.XCAFDoc import XCAFDoc_DocumentTool, XCAFDoc_ColorGen, XCAFDoc_ColorSurf
from OCCT.XmlXCAFDrivers import XmlXCAFDrivers
from OCCT.TCollection import TCollection_ExtendedString
from OCCT.TDF import TDF_LabelSequence
from OCCT.TDataStd import TDataStd_Name
from OCCT.TDocStd import TDocStd_Document
from OCCT.TNaming import TNaming_NamedShape
from OCCT.Interface import Interface_Static
# DBG
def export_step(shape, path):
writer = STEPControl_Writer()
writer.Transfer( shape, STEPControl_AsIs )
writer.Write(path)
# DBG
def print_shape_type(label, shapeTool):
if shapeTool.IsFree_(label):
print("Free")
if shapeTool.IsShape_(label):
print("Shape")
if shapeTool.IsSimpleShape_(label):
print("SimpleShape")
if shapeTool.IsReference_(label):
print("Reference")
if shapeTool.IsAssembly_(label):
print("Assembly")
if shapeTool.IsComponent_(label):
print("Component")
if shapeTool.IsCompound_(label):
print("Compound")
if shapeTool.IsSubShape_(label):
print("SubShape")
# Returns a ListOfShape containing the faces to be removed in the defeaturing
# NOTE: For concisness I've simplified this algorithm and as such it *MAY* not produce exactly
# the same output as shown in the screenshots but should still do SOME simplification
def select_faces(shape):
exp = TopExp_Explorer(shape, TopAbs_FACE)
selection = TopTools_ListOfShape()
nfaces = 0
while exp.More():
rgb = None
s = exp.Current()
exp.Next()
nfaces += 1
face = TopoDS.Face_(s)
gprops = GProp_GProps()
BRepGProp.SurfaceProperties_(face, gprops)
area = gprops.Mass()
surf = BRep_Tool.Surface_(face)
if area < 150:
selection.Append(face)
#log(f"\t\tRemoving face with area: {area}")
return selection, nfaces
# Performs the defeaturing
def simplify(shape):
defeaturer = BRepAlgoAPI_Defeaturing()
defeaturer.SetShape(shape)
sel = select_faces(shape)
if sel[0].Extent() == 0:
return shape
defeaturer.AddFacesToRemove(sel[0])
defeaturer.SetRunParallel(True)
defeaturer.SetToFillHistory(False)
defeaturer.Build()
if (not defeaturer.IsDone()):
return shape# TODO: Handle errors
return defeaturer.Shape()
# Given the label of an entity it finds it's displayed colour. If the entity has no defined colour the parents are searched for defined colours as well.
def find_color(label, colorTool):
col = Quantity_ColorRGBA()
status = False
while not status and label != None:
try:
status = colorTool.GetColor(label, XCAFDoc_ColorSurf, col)
except:
break
label = label.Father()
return (col.GetRGB().Red(), col.GetRGB().Green(), col.GetRGB().Blue(), col.Alpha(), status, col)
# Finds all child shapes and simplifies them recursively. Returns true if there were any subshapes.
# For now this assumes all shapes passed into this are translated as "SimpleShape".
# "Assembly" entities should be skipped as we don't need to touch them, "Compound" entities should work with this as well, though the behaviour is untested.
# Use the print_shape_type(shapeLabel, shapeTool) method to identify a shape.
def simplify_subshapes(shapeLabel, shapeTool, colorTool, set_colours=None):
labels = TDF_LabelSequence()
shapeTool.GetSubShapes_(shapeLabel, labels)
#print_shape_type(shapeLabel, shapeTool)
#print(f"{shapeTool.GetShape_(shapeLabel).ShapeType()}")
cols = {}
for i in range(1, labels.Length()+1):
label = labels.Value(i)
currShape = shapeTool.GetShape_(label)
print(f"\t{currShape.ShapeType()}")
if currShape.ShapeType() == TopAbs_COMPOUND:
# This code path should never be taken as far as I understand
simplify_subshapes(label, shapeTool, colorTool, set_colours)
else:
''' See the comment at the bottom of the main loop for an explanation of the function of this block
col = find_color(label, colorTool)
#print(f"{name} RGBA: {col[0]:.5f} {col[1]:.5f} {col[2]:.5f} {col[3]:.5f} defined={col[4]}")
cols[label.Tag()] = col
if set_colours != None:
colorTool.SetColor(label, set_colours[label.Tag()][5], XCAFDoc_ColorSurf)'''
# Doing both of these things seems to result in colours being reset but the geometry doesn't get replaced
nshape = simplify(currShape)
shapeTool.SetShape(label, nshape) # This doesn't work
return labels.Length() > 0, cols
# Set up XCaf Document
app = XCAFApp_Application.GetApplication_()
fmt = TCollection_ExtendedString('MDTV-XCAF')
doc = TDocStd_Document(fmt)
app.InitDocument(doc)
shapeTool = XCAFDoc_DocumentTool.ShapeTool_(doc.Main())
colorTool = XCAFDoc_DocumentTool.ColorTool_(doc.Main())
# Import the step file
reader = STEPCAFControl_Reader()
reader.SetNameMode(True)
reader.SetColorMode(True)
Interface_Static.SetIVal_("read.stepcaf.subshapes.name", 1) # Tells the importer to import subshape names
reader.ReadFile("testcolours.step")
reader.Transfer(doc)
labels = TDF_LabelSequence()
shapeTool.GetShapes(labels)
# Simplify each shape that was imported
for i in range(1, labels.Length()+1):
label = labels.Value(i)
shape = shapeTool.GetShape_(label)
# Assemblies are just made of other shapes, so we'll skip this and simplify them individually...
if shapeTool.IsAssembly_(label):
continue
# This function call here is meant to be the fix for the bug described.
# The idea was to check if the TopoDS_Shape we're looking at is a COMPOUND and if so we would simplify and call SetShape()
# on each of the sub-shapes instead in an attempt to preserve the colours stored in the sub-shape's labels.
#status, loadedCols = simplify_subshapes(label, shapeTool, colorTool)
#if status:
#continue
shape = simplify(shape)
shapeTool.SetShape(label, shape)
# The code gets a bit messy here because this was another attempt at fixing the problem by building a dictionary of colours
# before the shapes were simplified and then resetting the colours of each subshape after simplification.
# This didn't work either.
# But the idea was to call this function once to generate the dictionary, then simplify, then call it again passing in the dictionary so it could be re-applied.
#if status:
# simplify_subshapes(label, shapeTool, colorTool, loadedCols)
shapeTool.UpdateAssemblies()
# Re-export
writer = STEPCAFControl_Writer()
Interface_Static.SetIVal_("write.step.assembly", 2)
Interface_Static.SetIVal_("write.stepcaf.subshapes.name", 1)
writer.Transfer(doc, STEPControl_AsIs)
writer.Write("testcolours-simplified.step")
There's a lot of stuff here for a minimum reproducible example but the general flow of the program is that we import the step file:
reader.ReadFile("testcolours.step")
reader.Transfer(doc)
Then we iterate through each label in the file (essentially every node in the tree):
labels = TDF_LabelSequence()
shapeTool.GetShapes(labels)
# Simplify each shape that was imported
for i in range(1, labels.Length()+1):
label = labels.Value(i)
shape = shapeTool.GetShape_(label)
We skip any labels marked as assemblies since they contain children and we only want to simplify individual bodies. We then call simplify(shape) which performs the simplification and returns a new shape, we then call shapeTool.SetShape() to bind the new shape to the old label.
The thing that doesn't work here is that as explained, Component3 and Component4 don't get marked as Assemblies and are treated as SimpleShapes and when they are simplified as one shape, the colours are lost.
One solution I attempted was to call a method simplify_subshapes() which would iterate through each of the subshapes, and do the same thing as the main loop, simplifying them and then calling SetShape(). This ended up being even worse as it resulted in those bodies not being simplified at all but still loosing their colours.
I also attempted to use the simplify_subshapes() method to make a dictionary of all the colours of the subshapes, then simplify the COMPOUND shape and then call the same method again to this time re-apply the colours to the subshapes using the dictionary (the code for this is commented out with an explanation as to what it did).
col = find_color(label, colorTool)
#print(f"{name} RGBA: {col[0]:.5f} {col[1]:.5f} {col[2]:.5f} {col[3]:.5f} defined={col[4]}")
cols[label.Tag()] = col
if set_colours != None:
colorTool.SetColor(label, set_colours[label.Tag()][5], XCAFDoc_ColorSurf)
As far as I see it the issue could be resolved either by getting open cascade to import Component3 and Component4 as Assemblies OR by finding a way to make SetShape() work as intended on subshapes.
Here's a link to the test file:
testcolours.step
I'm have implemented an Evolutionary Algorithm process in Python 3.8, and am attempting to optimise/reduce its runtime. Due to the heavy constraints upon valid solutions, it can take a few minutes to generate valid chromosomes. To avoid spending hours just generating the initial population, I want to use Multiprocessing to generate multiple at a time.
My code at this point in time is:
populationCount = 500
def readDistanceMatrix():
# code removed
def generateAvailableValues():
# code removed
def generateAvailableValuesPerColumn():
# code removed
def generateScheduleTemplate():
# code removed
def generateChromosome():
# code removed
if __name__ == '__main__':
# Data type = DataFrame
distanceMatrix = readDistanceMatrix()
# Data type = List of Integers
availableValues = generateAvailableValues()
# Data type = List containing Lists of Integers
availableValuesPerColumn = generateAvailableValuesPerColumn(availableValues)
# Data type = DataFrame
scheduleTemplate = generateScheduleTemplate(distanceMatrix)
# Data type = List containing custom class (with Integer and DataFrame)
population = []
while len(population) < populationCount:
chrmSolution = generateChromosome(availableValuesPerColumn, scheduleTemplate, distanceMatrix)
population.append(chrmSolution)
Where the population list is filled in with the while loop at the end. I would like to replace the while loop with a Multiprocessing solution that can use up to a pre-set number of cores. For example:
population = []
availableCores = 6
while len(population) < populationCount:
while usedCores < availableCores:
# start generating another chromosome as 'chrmSolution'
population.append(chrmSolution)
However, after reading and watching hours worth of tutorials, I'm unable to get a loop up-and-running. How should I go about doing this?
It sounds like a simple multiprocessing.Pool should do the trick, or at least be a place to start. Here's a simple example of how that might look:
from multiprocessing import Pool, cpu_count
child_globals = {} #mutable object at the `module` level acts as container for globals (constants)
if __name__ == '__main__':
# ...
def init_child(availableValuesPerColumn, scheduleTemplate, distanceMatrix):
#passing variables to the child process every time is inefficient if they're
# constant, so instead pass them to the initialization function, and let
# each child re-use them each time generateChromosome is called
child_globals['availableValuesPerColumn'] = availableValuesPerColumn
child_globals['scheduleTemplate'] = scheduleTemplate
child_globals['distanceMatrix'] = distanceMatrix
def child_work(i):
#child_work simply wraps generateChromosome with inputs, and throws out dummy `i` from `range()`
return generateChromosome(child_globals['availableValuesPerColumn'],
child_globals['scheduleTemplate'],
child_globals['distanceMatrix'])
with Pool(cpu_count(),
initializer=init_child, #init function to stuff some constants into the child's global context
initargs=(availableValuesPerColumn, scheduleTemplate, distanceMatrix)) as p:
#imap_unordered doesn't make child processes wait to ensure order is preserved,
# so it keeps the cpu busy more often. it returns a generator, so we use list()
# to store the results into a list.
population = list(p.imap_unordered(child_work, range(populationCount)))
I'm trying to implement a GNURadio source block in Python, which has to produce a vector of a fixed size at each call of the [general_] work function.
As a first toy-example I tried to output just a vector of constant values which should change at each call of the [general_] work function.
import numpy
import sys
from gnuradio import gr
class my_source_vf(gr.sync_block):
"""
docstring for block
"""
def __init__(self, v_size):
self.v_size = v_size
self.mult = 1
self.buff = numpy.ones(v_size)
gr.sync_block.__init__(self,
name="my_source_vf",
in_sig=None,
#out_sig=[numpy.float32])
out_sig=[(numpy.float32, self.v_size)])
def work(self, input_items, output_items):
# <+signal processing here+>
print len(output_items)
out = output_items[0]
out[0][:] = self.buff*self.mult
self.mult = self.mult+1
return self.v_size
However, when I connect it to QT GUI Vector sink block I just see oscillations between 0 and 1, which let me think [general_] work function is called just once.
You mustn't return v_size – that's the length of one item, but you should return the number of items you've produced this call.
Ok, full disclosure, I'm not doing a grid search but the minimum example I could find for what I'm about to ask could (with a grain of salt) be reduced to a grid search (in case you're wondering why I'm not mentioning numpy and friends).
I'm doing a grid search in Python where one axis is discrete and the other one continuous, but for simplicity, let's say I have the following:
x_axis = ['linear', 'quadratic', 'cubic']
y_axis = range(1, 100) # simplification
The evaluation of my function depends on both axes and an intermediate log structure, so in an effort to have no global data I'm defining my function as a closure:
def get_function(xval, *args):
""" creates the closure that encapsulates thread local data
"""
log = { } # initialization depends on args
def fun(yval):
""" evaluation dedicated to single x_axis value
"""
if yval in log:
# in a proper grid search I wouldn't check twice
# but this is just to show that log is used and
# ammended inside fun()
else:
log[yval] = 0
return very_time_consuming_fun(xval, yval, log)
So the script uses this set up to run a grid search:
def func_eval(fun):
for yval in y_axis:
fun(yval)
# the loop I want to parallelize
for xval in x_axis:
fun = get_function(xval, args) # args are computed based on xval
func_eval(fun) # can I do result = func_eval(fun) ?
The things I want to ask are:
Am I correct in assuming that log works with a different instance for each x_axis value?
What is the best way to parallelize the last for loop? (If synchronization is needed for the log instances please elaborate). Again, I only want the evaluation of each x_axis value to its thread / core / you name it (best practices are welcomed)
Is there a way to get results out for each func_eval i.e. could it still be parallelized, if I had the following:
out = func_eval(fun)
I hate the silence ...
This is what I'm doing for now (so at least I'll get downvoted if it ain't right)
import multiprocessing
pool = multiprocessing.Pool()
funcs = [get_function(xval, args) for x in x_axis]
outputs = pool.map(func_eval, funcs)
this should be good enough according but I get the following error:
PicklingError: Can't pickle : attribute lookup builtin.function failed
Minimal Question:
def smooth(indicator, aggregation, tick):
storage.ZZZ = []
storage.ZZZZ = []
is the pertinent part of my definition, when I call that definition I'm using:
MA_now_smooth = smooth(MA, IN, I)[-1]
where MA is an input array, IN and I are constants; the definition is further defined below but ultimately returns the last input to storage.ZZZZ. What I want is to create custom storage objects that are named according to the "indicator" input so that the persistent variables don't overlap when calling upon this definition for myriad array inputs.
ie
smooth(MA, IN, I)[-1]
should create:
storage.ZZZ_MA
storage.ZZZZ_MA
but
smooth(MA2, IN, I)[-1]
should create:
storage.ZZZ_MA2
storage.ZZZZ_MA2
In Depth Question:
I'm creating an Simple Moving Average smoothing definition for TA-lib indicators at tradewave.net; TA-lib is a library of black box functions that give "Financial Technical Analysis" array outputs for things like "moving average" "exponential moving average" "stochastic" etc. My definition is a secondary simple smoothing of these TA-lib functions.
I'm having to do this because when "aggregating" candles counting backwards from current, I'm getting "wiggly" outputs; you can read more about that here if you need background: https://discuss.tradewave.net/t/aggregating-candles-some-thoughts
My definition code works well to create a list of smoothed values when smoothing a single indicator "MA"; a TA-lib array:
import talib
def smooth(indicator, aggregation, tick):
import math
A = int(math.ceil(aggregation/tick))
if info.tick == 0:
storage.ZZZ = []
storage.ZZZZ = []
storage.ZZZ.append(indicator[-1])
storage.ZZZ = storage.ZZZ[-A:]
ZZZ = sum(storage.ZZZ) / len(storage.ZZZ)
storage.ZZZZ.append(ZZZ)
storage.ZZZZ = storage.ZZZZ[-250:]
return storage.ZZZZ
def tick():
I = info.interval
period = 10
IN = 3600
instrument = pairs.btc_usd
C = data(interval=IN)[instrument].warmup_period('close')
MA = talib.MA(C, timeperiod=period, matype=0)
MA_now = MA[-1]
MA_now_smooth = smooth(MA, IN, I)[-1]
plot('MA', MA_now)
plot('MA_smooth', MA_now_smooth)
However, when I attempt to smooth more than one indicator with the same definition, it fails because the persistent variables in the definition are the same for both MA and MA2. This does not work:
import talib
def smooth(indicator, aggregation, tick):
import math
A = int(math.ceil(aggregation/tick))
if info.tick == 0:
storage.ZZZ = []
storage.ZZZZ = []
storage.ZZZ.append(indicator[-1])
storage.ZZZ = storage.ZZZ[-A:]
ZZZ = sum(storage.ZZZ) / len(storage.ZZZ)
storage.ZZZZ.append(ZZZ)
storage.ZZZZ = storage.ZZZZ[-250:]
return storage.ZZZZ
def tick():
I = info.interval
period = 10
IN = 3600
instrument = pairs.btc_usd
C = data(interval=IN)[instrument].warmup_period('close')
MA = talib.MA(C, timeperiod=period, matype=0)
MA2 = talib.MA(C, timeperiod=2*period, matype=0)
MA_now = MA[-1]
MA2_now = MA2[-1]
MA_now_smooth = smooth(MA, IN, I)[-1]
MA2_now_smooth = smooth(MA2, IN, I)[-1]
plot('MA', MA_now)
plot('MA2', MA2_now)
plot('MA_smooth', MA_now_smooth)
plot('MA2_smooth', MA2_now_smooth)
What I would like to do... and don't understand how to do:
I'd like for the definition to create a new persistent storage object for each new input and I'd like for the names of my objects to detect the name of the "indicator" input, ie:
storage.ZZZ_MA
storage.ZZZZ_MA
ZZZ_MA
for the "MA" smoothing and
storage.ZZZ_MA2
storage.ZZZZ_MA2
ZZZ_MA2
for "MA2" smoothing
I would like to be able to reuse this definition with many different array inputs for "indicator" and for each instance use the name of the indicator array appended to the persistent object names used in the definition. For example:
storage.ZZZ_MA3
storage.ZZZ_MA4
etc.
In the instances below info.interval is my tick size of 15 minutes (900 sec) and my aggregation was 1 hour (3600 sec)
With the single output of "MA" and correct smoothing
With dual outputs of "MA" and "MA2" I'm getting incorrect smoothing
In the second image I'm looking for a two "smooth" lines one in the middle of the wiggly red plot and the other in the middle of wiggly blue plot. Instead I'm getting two identical wiggly lines (purple & orange) that split the difference. I understand why, but I don't know how to fix it.
1) please show me how
2) please tell me what I'm looking to do is "called" and point me to some tags/posts where I can learn more.
Thanks for your help!
LP
Make storage a dict, and use string keys rather than trying to create and access dynamic variables?
Well I've arrived at an interim solution.
While I like this solution as its doing everything I need. I would like to eliminate the redundant "label" input. Is there any way for me to reference the name of my input parameter/argument "indicator" instead of its object so that I could return to my original 3 input parameters rather than 4?
I tried this:
def smooth(indicator, aggregation, tick):
import math
A = int(math.ceil(aggregation/tick))
ZZZ = 'ZZZ_%s' % dict([(t.__name__, t) for t in indicator])
ZZZZ = 'ZZZZ_%s' % dict([(t.__name__, t) for t in indicator])
if info.tick == 0:
storage[ZZZ] = []
storage[ZZZZ] = []
storage[ZZZ].append(indicator[-1])
storage[ZZZ] = storage[ZZZ][-A:]
ZZZZZ = sum(storage[ZZZ]) / len(storage[ZZZ])
storage[ZZZZ].append(ZZZZZ)
storage[ZZZZ] = storage[ZZZZ][-250:]
return storage[ZZZZ]
but I get:
File "", line 259, in File "", line 31, in tick File "", line 6, in smooth AttributeError: 'numpy.float64' object has no attribute 'name'
Here is my current 4 argument definition smoothing 4 different TA-lib moving averages. This same definition can be used with many other aggregated TA-lib indicators. It should work with ANY aggregate/tick size ratio including 1:1.
import talib
def smooth(indicator, aggregation, tick, label):
import math
A = int(math.ceil(aggregation/tick))
ZZZ = 'ZZZ_%s' % label
ZZZZ = 'ZZZZ_%s' % label
if info.tick == 0:
storage[ZZZ] = []
storage[ZZZZ] = []
storage[ZZZ].append(indicator[-1])
storage[ZZZ] = storage[ZZZ][-A:]
ZZZZZ = sum(storage[ZZZ]) / len(storage[ZZZ])
storage[ZZZZ].append(ZZZZZ)
storage[ZZZZ] = storage[ZZZZ][-250:]
return storage[ZZZZ]
def tick():
I = info.interval
period = 10
IN = 3600
instrument = pairs.btc_usd
C = data(interval=IN)[instrument].warmup_period('close')
MA1 = talib.MA(C, timeperiod=period, matype=0)
MA2 = talib.MA(C, timeperiod=2*period, matype=0)
MA3 = talib.MA(C, timeperiod=3*period, matype=0)
MA4 = talib.MA(C, timeperiod=4*period, matype=0)
MA1_now = MA1[-1]
MA2_now = MA2[-1]
MA3_now = MA3[-1]
MA4_now = MA4[-1]
MA1_now_smooth = smooth(MA1, IN, I, 'MA1')[-1]
MA2_now_smooth = smooth(MA2, IN, I, 'MA2')[-1]
MA3_now_smooth = smooth(MA3, IN, I, 'MA3')[-1]
MA4_now_smooth = smooth(MA4, IN, I, 'MA4')[-1]
plot('MA1', MA1_now)
plot('MA2', MA2_now)
plot('MA3', MA3_now)
plot('MA4', MA4_now)
plot('MA1_smooth', MA1_now_smooth)
plot('MA2_smooth', MA2_now_smooth)
plot('MA3_smooth', MA3_now_smooth)
plot('MA4_smooth', MA4_now_smooth)
h/t james for collaboration