Is it possible to create a Mayavi visualization that is updated on a timed bases rather than through Trait events?
I have a visualization that I need to update continually, but the data I am updating is coming from an external source (I.E. not an event from a user input from the graphical interface).
In the mean time, I need to be running a separate thread - so the Mayavi visualization can't control the main loop.
Can this be done? And if so How??
Any help would be very greatly appreciated.
Some dummy code for how I'm trying to tackle this is below:
import numpy
from mayavi.sources.array_source import ArraySource
from pyface.api import GUI
from mayavi.modules.api import Surface
from mayavi.api import Engine
import threading
import time
# Class runs a given function on a given thread at a given scan time
class TimedThread(threading.Thread):
def __init__(self, thread, scan_time, funct, *funct_args):
threading.Thread.__init__(self)
# Thread for the function to operate in
self.thread = thread
# Defines the scan time the function is to be run at
self.scan_time = scan_time
# Function to be run
self.run_function = funct
# Function arguments
self.funct_args = funct_args
def run(self):
while True:
# Locks the relevant thread
self.thread.acquire()
# Begins timer for elapsed time calculation
start_time = time.time()
# Runs the function that was passed to the thread
self.run_function(*self.funct_args)
# Wakes up relevant threads to listen for the thread release
self.thread.notify_all()
# Releases thread
self.thread.release()
# Calculates the elapsed process time & sleeps for the remainder of the scan time
end_time = time.time()
elapsed_time = end_time - start_time
sleep_time = self.scan_time - elapsed_time
if sleep_time > 0:
time.sleep(sleep_time)
else:
print 'Process time exceeds scan time'
# Function to update the visualisation
def update_visualisation(source):
print("Updating Visualization...")
# Pretend the data is being updated externally
x = numpy.array([0, numpy.random.rand()])
y = z = x
data = [x, y, z]
source.scalar_data = data
GUI.invoke_later(source.update)
# Function to run the visualisation
def run_main():
print 'Running Main Controller'
if __name__ == '__main__':
c = threading.Condition()
# Create a new Engine for Mayavi and start it
engine = Engine()
engine.start()
# Create a new Scene
engine.new_scene()
# Create the data
x = numpy.linspace(0, 10, 2)
y = z = x
data = [x, y, z]
# Create a new Source, map the data to the source and add it to the Engine
src = ArraySource()
src.scalar_data = data
engine.add_source(src)
# Create a Module
surf = Surface()
# Add the Module to the Engine
engine.add_module(surf)
# Create timed thread classes
visualisation_thread = TimedThread(c, 2.0, update_visualisation, src)
main_thread = TimedThread(c, 1.0, run_main)
# Start & join the threads
main_thread.start()
visualisation_thread.start()
main_thread.join()
visualisation_thread.join()
Found solution in the following link:
Animating a mayavi points3d plot
Solved by using the #mlab.animator to call the update function and using the yield command to release the animation to allow for user manipulation.
Solution below:
import numpy as np
import threading
import time
from mayavi import mlab
from mayavi.api import Engine
# Class runs a given function on a given thread at a given scan time
class SafeTimedThread(threading.Thread):
def __init__(self, thread_condition, scan_time, funct, *funct_args):
threading.Thread.__init__(self)
# Thread condition for the function to operate with
self.tc = thread_condition
# Defines the scan time the function is to be run at
self.scan_time = scan_time
# Function to be run
self.run_function = funct
# Function arguments
self.funct_args = funct_args
def run(self):
while True:
# Locks the relevant thread
self.tc.acquire()
# Begins timer for elapsed time calculation
start_time = time.time()
# Runs the function that was passed to the thread
self.run_function(*self.funct_args)
# Wakes up relevant threads to listen for the thread release
self.tc.notify_all()
# Releases thread
self.tc.release()
# Calculates the elapsed process time & sleep for the remainder of the scan time
end_time = time.time()
elapsed_time = end_time - start_time
sleep_time = self.scan_time - elapsed_time
if sleep_time > 0:
time.sleep(sleep_time)
else:
print 'Process time exceeds scan time'
# Function to run the main controller
def run_main():
print 'Running Main Controller'
def init_vis():
# Creates a new Engine, starts it and creates a new scene
engine = Engine()
engine.start()
engine.new_scene()
# Initialise Plot
data = np.random.random((3, 2))
x = data[0]
y = data[1]
z = data[2]
drawing = mlab.plot3d(x, y, z, np.ones_like(x))
return drawing
#mlab.animate(delay=500, ui=False)
def update_visualisation(drawing):
while True:
print ('Updating Visualisation')
# Pretend to receive data from external source
data = np.random.random((3, 2))
x = data[0]
y = data[1]
z = data[2]
drawing.mlab_source.set(x=x, y=y, z=z)
yield
if __name__ == '__main__':
# Create Condition for Safe Threading
c = threading.Condition()
# Create display window
dwg = init_vis()
# Create safe timed thread for main thread and start
main_thread = SafeTimedThread(c, 1.0, run_main).start()
# Update using mlab animator
vis_thread = update_visualisation(dwg)
mlab.show()
Related
i have less than 3 months of python programming under my belt but basically, i have a program that pulls values from the CoinGecko API indefinitely and creates processes so the functions that pull the data can run independently from one another, but id like for it to break its loop after i press the specified hotkey which is set to 'q'. whenever i press the hotkey, nothing happens and the loop just keeps running. i've tried using the keyboard.read_key() function, but that just stops my program from running until i press the q button, which causes the program to run the loop once and then close. i have no idea why the is_pressed() function refuses to work and id like some help from more advanced people
Piece of Code in question:
from multiprocessing.dummy import freeze_support
from pycoingecko import CoinGeckoAPI
import time
from multiprocessing import Process
from multiprocessing import Pool
import multiprocessing
import keyboard as kb
import psutil
cg = CoinGeckoAPI()
class CGCoin:
def __init__(self, coinname, coinid):
self.coinname = coinname
self.coinid = coinid
def pulldata(self):
while True:
wishtoquit = False
if kb.is_pressed('Q'):
print('ending after this loop')
wishtoquit = True
timestarted = time.asctime()
self.prices = []
self.daychanges = []
self.volumes = []
self.marketcaps = []
self.weekchanges = []
self.highs = []
self.lows = []
self.times = []
print(f'starting {self.coinname} reading at {timestarted}')
loops = 0
maxloops = 2
while loops < maxloops:
time.sleep(15)
coin = cg.get_coin_by_id(f'{self.coinid}')
time.sleep(5)
coinvalues = coin.get('market_data')
coinprices = coinvalues.get('current_price')
coinvolumes = coinvalues.get('total_volume')
mrktcaps = coinvalues.get('market_cap')
dayhigh = coinvalues.get('high_24h')
daylow = coinvalues.get('low_24h')
daychangepercentage = coinvalues.get('price_change_percentage_24h')
weekchangepercentage = coinvalues.get('price_change_percentage_7d')
coinprice = coinprices.get('usd')
coinvolume = coinvolumes.get('usd')
coincap = mrktcaps.get('usd')
coindayhigh = dayhigh.get('usd')
coindaylow = daylow.get('usd')
timepulled = time.asctime()
self.prices.append(coinprice)
self.daychanges.append(daychangepercentage)
self.volumes.append(coinvolume)
self.marketcaps.append(coincap)
self.weekchanges.append(weekchangepercentage)
self.highs.append(coindayhigh)
self.lows.append(coindaylow)
self.times.append(timepulled)
loops = loops + 1
print(loops)
timeended = time.asctime()
})
print(f'stopping {self.coinname} reading at {timeended}')
if wishtoquit:
print('ending loops')
break
time.sleep(5)
bitcoin = CGCoin('Bitcoin', 'bitcoin')
ethereum = CGCoin('Ethereum', 'ethereum')
if __name__ == '__main__':
freeze_support()
btcpul = Process(target=bitcoin.pulldata, name=bitcoin.coinname)
btcpul.start()
if anyone has any ideas or fully-functional workarounds id really like to hear them. id be extremely grateful for any help recieved
It looks like PyPi keyboard needs root permissions on linux.
You could just do kb.on_press_key("p", lambda _: sys.exit(0)) and just do a sys.exit(0) to end the script.
If you're running this in the terminal you should just be able to press ctrl+c to interrupt its execution.
ref: How to detect key presses?
I'm trying to learn how to use threading and specifically concurrent.futures.ThreadPoolExecutor this is because I need to return a numpy.array from a function I want to run concurrently.
The end goal is to have one process running a video loop of an application, while another process does object detection and GUI interactions. The result() keyword from the concurrent.futures library allows me to do this.
The issue is my code runs once, and then seems to lock up. I'm actually unsure what happens as when I step through it in the debugger it runs once, then the debugger goes blank and I literally cannot step through and no error is thrown.
The code appears to lock up on the line: notepadWindow = pygetwindow.getWindowsWithTitle('Notepad')[0]
I get exactly one loop, the print statement prints once the loop restarts and then it halts at pygetwindow
I don't know much about the GIL but I have tried using the max_workers=1 argument on ThreadPoolExecutor() which doesn't make a difference either way and I was under the impression concurrent.futures allows me to bypass the lock.
How do I run videoLoop as a single thread making sure to return DetectionWindow every iteration?
import cv2 as cv
import numpy as np
import concurrent.futures
from PIL import ImageGrab
import pygetwindow
def videoLoop():
notepadWindow = pygetwindow.getWindowsWithTitle('Notepad')[0]
x1 = notepadWindow.left
y1 = notepadWindow.top
height = notepadWindow.height
width = notepadWindow.width
x2 = x1 + width
y2 = y1 + height
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
haystack_img_np = np.array(haystack_img)
DetectionWindow= cv.cvtColor(haystack_img_np, cv.COLOR_BGR2GRAY)
return DetectionWindow
def f1():
with concurrent.futures.ThreadPoolExecutor() as executor:
f1 = executor.submit(videoLoop)
notepadWindow = f1.result()
cv.imshow("Video Loop", notepadWindow)
cv.waitKey(1)
print(f1.result())
while True:
f1()
A ThreadPoolExecutor won't help you an awful lot here, if you want a continuous stream of frames.
Here's a reworking of your code that uses a regular old threading.Thread and puts frames (and their capture timestamps, since this is asynchronous) in a queue.Queue you can then read in another (or the main) thread.
The thread has an otherwise infinite loop that can be stopped by setting the thread's exit_signal.
(I didn't test this, since I'm presently on a Mac, so there may be typos or other problems.)
import queue
import time
import cv2 as cv
import numpy as np
import threading
from PIL import ImageGrab
import pygetwindow
def do_capture():
notepadWindow = pygetwindow.getWindowsWithTitle("Notepad")[0]
x1 = notepadWindow.left
y1 = notepadWindow.top
height = notepadWindow.height
width = notepadWindow.width
x2 = x1 + width
y2 = y1 + height
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
return cv.cvtColor(np.array(haystack_img), cv.COLOR_BGR2GRAY)
class VideoCaptureThread(threading.Thread):
def __init__(self, result_queue: queue.Queue) -> None:
super().__init__()
self.exit_signal = threading.Event()
self.result_queue = result_queue
def run(self) -> None:
while not self.exit_signal.wait(0.05):
try:
result = do_capture()
self.result_queue.put((time.time(), result))
except Exception as exc:
print(f"Failed capture: {exc}")
def process_frames(result_queue: queue.Queue):
start_time = time.time()
while time.time() - start_time < 5: # Run for five seconds
frame = result_queue.get()
print(frame)
def main():
result_queue = queue.Queue()
thread = VideoCaptureThread(result_queue=result_queue)
thread.start()
process_frames(result_queue)
thread.exit_signal.set()
thread.join()
if __name__ == "__main__":
main()
so I've been thinking about this for a couple days now and I cant figure it out, I've searched around but couldn't find the answer I was looking for, so any help would be greatly appreciated.
Essentially what I am trying to do is call a method on a group of objects in my main thread from a separate thread, just once after 2 seconds and then the thread can exit, I'm just using threading as a way of creating a non-blocking 2 second pause (if there are other ways of accomplishing this please let me know.
So I have a pyqtplot graph/plot that updates from a websocket stream and the gui can only be updated from the thread that starts it (the main one).
What happens is I open a websocket stream fill up a buffer for about 2 seconds, make an REST request, apply the updates from the buffer to the data from the REST request and then update the data/plot as new messages come in. Now the issue is I can't figure out how to create a non blocking 2 second pause in the main thread without creating a child thread. If I create a child thread and pass the object that contains the dictionary I want to update after 2 seconds, I get issues regarding updating the plot from a different thread. So what I THINK is happening is when that new spawned thread is spawned the reference to the object I want to update is actually the object itself, or the data (dictionary) containing the update data is now in a different thread as the gui and that causes issues.
open websocket --> start filling buffer --> wait 2 seconds --> REST request --> apply updates from buffer to REST data --> update data as new websocket updates/messages come in.
Unfortunately the websocket and gui only start when you run pg.exec() and you can't break them up to start individually, you create them and then start them together (or at least I have failed to find a way to start them individually, alternatively I also tried using a separate library to handle websockets however this requires starting a thread for incoming messages as well)
This is the minimum reproducible example, sorry it's pretty long but I couldn't really break it down anymore without removing required functionality as well as preserving context:
import json
import importlib
from requests.api import get
import functools
import time
import threading
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
QtWebSockets = importlib.import_module(pg.Qt.QT_LIB + '.QtWebSockets')
class coin():
def __init__(self):
self.orderBook = {'bids':{}, 'asks':{}}
self.SnapShotRecieved = False
self.last_uID = 0
self.ordBookBuff = []
self.pltwgt = pg.PlotWidget()
self.pltwgt.show()
self.bidBar = pg.BarGraphItem(x=[0], height=[1], width= 1, brush=(25,25,255,125), pen=(0,0,0,0))
self.askBar = pg.BarGraphItem(x=[1], height=[1], width= 1, brush=(255,25,25,125), pen=(0,0,0,0))
self.pltwgt.addItem(self.bidBar)
self.pltwgt.addItem(self.askBar)
def updateOrderBook(self, message):
for side in ['a','b']:
bookSide = 'bids' if side == 'b' else 'asks'
for update in message[side]:
if float(update[1]) == 0:
try:
del self.orderBook[bookSide][float(update[0])]
except:
pass
else:
self.orderBook[bookSide].update({float(update[0]): float(update[1])})
while len(self.orderBook[bookSide]) > 1000:
del self.orderBook[bookSide][(min(self.orderBook['bids'], key=self.orderBook['bids'].get)) if side == 'b' else (max(self.orderBook['asks'], key=self.orderBook['asks'].get))]
if self.SnapShotRecieved == True:
self.bidBar.setOpts(x0=self.orderBook['bids'].keys(), height=self.orderBook['bids'].values(), width=1 )
self.askBar.setOpts(x0=self.orderBook['asks'].keys(), height=self.orderBook['asks'].values(), width=1 )
def getOrderBookSnapshot(self):
orderBookEncoded = get('https://api.binance.com/api/v3/depth?symbol=BTCUSDT&limit=1000')
if orderBookEncoded.ok:
rawOrderBook = orderBookEncoded.json()
orderBook = {'bids':{}, 'asks':{}}
for orders in rawOrderBook['bids']:
orderBook['bids'].update({float(orders[0]): float(orders[1])})
for orders in rawOrderBook['asks']:
orderBook['asks'].update({float(orders[0]): float(orders[1])})
last_uID = rawOrderBook['lastUpdateId']
while self.ordBookBuff[0]['u'] <= last_uID:
del self.ordBookBuff[0]
if len(self.ordBookBuff) == 0:
break
if len(self.ordBookBuff) >= 1 :
for eachUpdate in self.ordBookBuff:
self.last_uID = eachUpdate['u']
self.updateOrderBook(eachUpdate)
self.ordBookBuff = []
self.SnapShotRecieved = True
else:
print('Error retieving order book.') #RESTfull request failed
def on_text_message(message, refObj):
messaged = json.loads(message)
if refObj.SnapShotRecieved == False:
refObj.ordBookBuff.append(messaged)
else:
refObj.updateOrderBook(messaged)
def delay(myObj):
time.sleep(2)
myObj.getOrderBookSnapshot()
def main():
pg.mkQApp()
refObj = coin()
websock = QtWebSockets.QWebSocket()
websock.connected.connect(lambda : print('connected'))
websock.disconnected.connect(lambda : print('disconnected'))
websock.error.connect(lambda e : print('error', e))
websock.textMessageReceived.connect(functools.partial(on_text_message, refObj=refObj))
url = QtCore.QUrl("wss://stream.binance.com:9443/ws/btcusdt#depth#1000ms")
websock.open(url)
getorderbook = threading.Thread(target = delay, args=(refObj,), daemon=True) #, args = (lambda : websocketThreadExitFlag,)
getorderbook.start()
pg.exec()
if __name__ == "__main__":
main()
I am designing a new time/score keeper for an air hockey table using a PyBoard as a base. My plan is to use a TM1627 (4x7seg) for time display, rotary encoder w/ button to set the time, IR and a couple 7segs for scoring, IR reflector sensors for goallines, and a relay to control the fan.
I'm getting hung up trying to separate the clock into its own thread while focusing on reading the sensors. Figured I could use uasyncio to split everything up nicely, but I can't figure out where to put the directives to spin off a thread for the clock and eventually the sensors.
On execution right now, it appears the rotary encoder is assigned the default value, no timer is started, the encoder doesn't set the time, and the program returns control to REPL rather quickly.
Prior to trying to async everything, I had the rotary encoder and timer working well. Now, not so much.
from rotary_irq_pyb import RotaryIRQ
from machine import Pin
import tm1637
import utime
import uasyncio
async def countdown(cntr):
# just init min/sec to any int > 0
min = sec = 99
enableColon = True
while True:
# update the 4x7seg with the time remaining
min = abs(int((cntr - utime.time()) / 60))
sec = (cntr - utime.time()) % 60
#print(str(), str(sec), sep=':' )
enableColon = not enableColon # alternately blink the colon
tm.numbers(min, sec, colon = enableColon)
if(min + sec == 0): # once both reach zero, break
break
await uasyncio.sleep(500)
X1 = pyb.Pin.board.X1
X2 = pyb.Pin.board.X2
Y1 = pyb.Pin.board.Y1
Y2 = pyb.Pin.board.Y2
button = pyb.Pin(pyb.Pin.board.X3, pyb.Pin.IN)
r = RotaryIRQ(pin_num_clk=X1,
pin_num_dt=X2,
min_val=3,
max_val=10,
reverse=False,
range_mode=RotaryIRQ.RANGE_BOUNDED)
tm = tm1637.TM1637(clk = Y1, dio = Y2)
val_old = val_new = 0
while True:
val_new = r.value()
if(val_old != val_new):
val_old = val_new
print(str(val_new))
if(button.value()): # save value as minutes
loop = uasyncio.get_event_loop()
endTime = utime.time() + (60 * val_new)
loop.create_task(countdown(endTime))
r.close() # Turn off Rotary Encoder
break
#loop = uasyncio.get_event_loop()
#loop.create_task(countdown(et))
#loop.run_until_complete(countdown(et))
I'm sure it's something simple, but this is the first non-CLI python script I've done, so I'm sure there are a bunch of silly mistakes. Any assistance would be appreciated.
I'm trying o thread the following code and send data to it (at random intervals) but I can't figure out how. I'm saving all the data to a txt file and reading the info from there, it isn't working very well. Is it possible to create a function that sends data to a specific thread( like : SendDataToThread(data, ThreadNumber) )? and how would I go about reading the data sent? I've seen a few solutions using queue but I was unable to understand them. here is the script I am temporarily using to plot the graph which I found here. sorry if the question seems simple but I've never before had to messed with threading or matplotlib.
import matplotlib.pyplot as plt
from threading import Thread
plt.ion()
class DynamicUpdate():
#Suppose we know the x range
min_x = 0
max_x = 10
def on_launch(self):
#Set up plot
self.figure, self.ax = plt.subplots()
self.lines, = self.ax.plot([],[], 'o')
#Autoscale on unknown axis and known lims on the other
self.ax.set_autoscaley_on(True)
self.ax.set_xlim(self.min_x, self.max_x)
#Other stuff
self.ax.grid()
...
def on_running(self, xdata, ydata):
#Update data (with the new _and_ the old points)
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
#Need both of these in order to rescale
self.ax.relim()
self.ax.autoscale_view()
#We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
#Example
def __call__(self):
# read/plot data
Here's some example code which shows how to do several of the things that were asked about. This uses multithreading rather than multiprocessing, and shows some examples of using queues, starting/stopping worker threads and updating a matplotlib plot with additional data.
(Part of the code comes from answers to other questions including this one and this one.)
The code shows a possible implementation of an asynchronous worker, to which data can be sent for subsequent processing. The worker uses an internal queue to buffer the data, and an internal thread (loop) that reads data from the queue, does some processing and sends the result for display.
An asynchronous plotter implementation is also shown. Results can be sent to this plotter from multiple workers. (This also uses an internal queue for buffering; this is done to allow the main program thread itself to call the function that updates the plot, which appears to be a requirement with matplotlib.)
NB This was written for Python 2.7 on OSX. Hope some of it may be useful.
import time
import threading
import Queue
import math
import matplotlib.pyplot as plt
class AsynchronousPlotter:
"""
Updates a matplotlib data plot asynchronously.
Uses an internal queue to buffer results passed for plotting in x, y pairs.
NB the output_queued_results() function is intended be called periodically
from the main program thread, to update the plot with any waiting results.
"""
def output_queued_results(self):
"""
Plots any waiting results. Should be called from main program thread.
Items for display are x, y pairs
"""
while not self.queue.empty():
item = self.queue.get()
x, y = item
self.add_point(x, y)
self.queue.task_done()
def queue_result_for_output(self, x, y):
"""
Queues an x, y pair for display. Called from worker threads, so intended
to be thread safe.
"""
self.lock.acquire(True)
self.queue.put([x, y])
self.lock.release()
def redraw(self):
self.ax.relim()
self.ax.autoscale_view()
self.fig.canvas.draw()
plt.pause(0.001)
def add_point(self, x, y):
self.xdata.append(x)
self.ydata.append(y)
self.lines.set_xdata(self.xdata)
self.lines.set_ydata(self.ydata)
self.redraw()
def __init__(self):
self.xdata=[]
self.ydata=[]
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.lines, = self.ax.plot(self.xdata, self.ydata, 'o')
self.ax.set_autoscalex_on(True)
self.ax.set_autoscaley_on(True)
plt.ion()
plt.show()
self.lock = threading.Lock()
self.queue = Queue.Queue()
class AsynchronousWorker:
"""
Processes data asynchronously.
Uses an internal queue and internal thread to handle data passed in.
Does some processing on the data in the internal thread, and then
sends result to an asynchronous plotter for display
"""
def queue_data_for_processing(self, raw_data):
"""
Queues data for processing by the internal thread.
"""
self.queue.put(raw_data)
def _worker_loop(self):
"""
The internal thread loop. Runs until the exit signal is set.
Processes the supplied raw data into something ready
for display.
"""
while True:
try:
# check for any data waiting in the queue
raw_data = self.queue.get(True, 1)
# process the raw data, and send for display
# in this trivial example, change circle radius -> area
x, y = raw_data
y = y**2 * math.pi
self.ap.queue_result_for_output(x, y)
self.queue.task_done()
except Queue.Empty:
pass
finally:
if self.esig.is_set():
return
def hang_up(self):
self.esig.set() # set the exit signal...
self.loop.join() # ... and wait for thread to exit
def __init__(self, ident, ap):
self.ident = ident
self.ap = ap
self.esig = threading.Event()
self.queue = Queue.Queue()
self.loop = threading.Thread(target=self._worker_loop)
self.loop.start()
if __name__ == "__main__":
ap = AsynchronousPlotter()
num_workers = 5 # use this many workers
# create some workers. Give each worker some ID and tell it
# where it can find the output plotter
workers = []
for worker_number in range (num_workers):
workers.append(AsynchronousWorker(worker_number, ap))
# supply some data to the workers
for worker_number in range (num_workers):
circle_number = worker_number
circle_radius = worker_number * 4
workers[worker_number].queue_data_for_processing([circle_number, circle_radius])
# wait for workers to finish then tell the plotter to plot the results
# in a longer-running example we would update the plot every few seconds
time.sleep(2)
ap.output_queued_results();
# Wait for user to hit return, and clean up workers
raw_input("Hit Return...")
for worker in workers:
worker.hang_up()
I kinda improved the code I can send a value to it when it is being created so that is good, but with multiprocessing I can't really figure out how to make the plot show. When I call the plot without multiprocessing it works so it might be something simple that I can't see. Also I'm trying to study the code you left a link to but to me, it's not very clear. I'm also trying to save the processes to a list so that later I can try to send the data directly to the process while the process is running(I think it's with pipe that I do this but, I'm not sure)
import matplotlib.pyplot as plt
from multiprocessing import Process
plt.ion()
class DynamicUpdate():
#Suppose we know the x range
min_x = 0
max_x = 10
def __init__(self, x):
self.number = x
def on_launch(self):
#Set up plot
self.figure, self.ax = plt.subplots()
self.lines, = self.ax.plot([],[], 'o')
#Autoscale on unknown axis and known lims on the other
self.ax.set_autoscaley_on(True)
self.ax.set_xlim(self.min_x, self.max_x)
#Other stuff
self.ax.grid()
...
def on_running(self, xdata, ydata):
#Update data (with the new _and_ the old points)
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
#Need both of these in order to rescale
self.ax.relim()
self.ax.autoscale_view()
#We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
#Example
def __call__(self):
print(self.number)
import numpy as np
import time
self.on_launch()
xdata = []
ydata = []
for x in np.arange(0,10,0.5):
xdata.append(x)
ydata.append(np.exp(-x**2)+10*np.exp(-(x-7)**2))
self.on_running(xdata, ydata)
time.sleep(1)
return xdata, ydata
_processes_=[]
for i in range(0,2):
_processes_.append(Process(target=DynamicUpdate(i)))
p = Process(target=_processes_[i])
p.start()
# tried adding p.join(), but it didn't change anything
p.join()