I have the following situation ( all 3 are functions in a python class ) where I have to send a message to a remote device with 2 callbacks that give detail about the state of the remote device.
# callback when a app has completed downloaded on a remote device
def handleAppDownloadComplete():
#something
# callback when an app has restarted on a remote device
def handleAppRestart():
# app restart callback
def sendMessage(message):
// Do things like validation etc
sendMessageToRemoteDevice(message)
My situation is
1) call sendMessage when handleAppDownloadComplete callback is called
2) At any point during sendMessage(), if handleAppRestart() is called, stop execution of sendMessage(), wait for handleAppDownloadComplete() to be called back and call sendMessage() again.
I have tried to use threading.events(), but this seems very cyclical for me. And to add, both the call backs are provided by third party libraries and I can't change them. Any better way/design to handle this situation?
https://docs.python.org/3/library/asyncio-task.html#future (look at the example)
You could model the call to sendMessage() as a task which could be cancelled by handleAppRestart(). So you'd have a class variable task which would bind to a task.
import asyncio
class foo:
task = None
loop = asyncio.get_event_loop()
def handleAppDownloadComplete()
{
task = asyncio.ensure_future(sendMessage(bar))
loop.run_until_complete(tasks)
}
# callback when an app has restarted on a remote device
def handleAppRestart()
{
task.cancel()
}
#asyncio.coroutine
def sendMessage(message)
{
// Do things like validation etc
sendMessageToRemoteDevice(message)
}
Btw what you gave in your question isn't Python code, and neither is my answer (Python doesn't use {} and I didn't indent correctly).
Anyway, answer is: Use asynchronous abstractions to do what you want.
EDIT: Wait, you can't change handleAppDownloadComplete(), handleAppRestart() or sendMessage(message)?
Related
I'm working on an asynchronous communication script that will act as a middleman between a react native app and another agent. To do this I used python with DBUS to implement the communication between the two.
To implement this we created two processes one for the BLE and one for the communication with the agent. In cases where the agent replies immediately (with a non-blocking call) the communication always works as intended. For the case where we attach to a signal to continuously monitor an update status this error occurs most of the time at random points during the process:
dbus.exceptions.DBusException: org.freedesktop.DBus.Error.NoReply: Did not receive a reply. Possible causes include: the remote application did not send a reply, the message bus security policy blocked the reply, the reply timeout expired, or the network connection was broken.
I have tested both the BLE process and the agent process separately and they work as intended.
I'm currently suspecting that it could be related to messages "crashing" on the system bus or some race conditions but we are unsure how to validate that.
Any advice on what could be causing this issue or how I could avoid it?
For completeness I've attached a simplified version of the class that handles the communication with the agent.
import multiprocessing
from enum import Enum
import dbus
import dbus.mainloop.glib
from dbus.proxies import ProxyObject
from gi.repository import GLib
from omegaconf import DictConfig
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
class ClientUpdateStatus(Enum):
SUCCESS = 0
PENDING = 1
IN_PROGRESS = 2
FAILED = 3
class DBUSManager:
GLIB_LOOP = GLib.MainLoop()
COMMUNICATION_QUEUE = multiprocessing.Queue()
def __init__(self, config: DictConfig) -> None:
dbus_system_bus = dbus.SystemBus()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
self._config = config
self._dbus_object = dbus_system_bus.get_object(self._config['dbus_interface'],
self._config['dbus_object_path'], introspect=False)
def get_version(self) -> str:
version = self._dbus_object.GetVersion("clientSimulator", dbus_interface=self._config['dbus_interface'])
return version
def check_for_update(self) -> str:
update_version = self._dbus_object.CheckForUpdate("clientSimulator",
dbus_interface=self._config['dbus_interface'])
return update_version
def run_update(self) -> ClientUpdateStatus:
raw_status = self._dbus_object.ExecuteUpdate(dbus_interface=self._config['dbus_interface'])
update_status = ClientUpdateStatus(raw_status)
# Launch listening process
signal_update_proc = multiprocessing.Process(target=DBUSManager.start_listener_process,
args=(self._dbus_object, self._config['dbus_interface'],))
signal_update_proc.start()
while True:
raw_status = DBUSManager.COMMUNICATION_QUEUE.get()
update_status = ClientUpdateStatus(raw_status)
if ClientUpdateStatus.SUCCESS == update_status:
break
signal_update_proc.join()
return update_status
#staticmethod
def start_listener_process(dbus_object: ProxyObject, dbus_interface: str) -> None:
dbus_object.connect_to_signal("UpdateStatusChanged", DBUSManager.status_change_handler,
dbus_interface=dbus_interface)
# Launch loop to acquire signals
DBUSManager.GLIB_LOOP.run() # This listening loop exits on GLIB_LOOP.quit()
#staticmethod
def status_change_handler(new_status: int) -> None:
DBUSManager.COMMUNICATION_QUEUE.put(new_status)
if ClientUpdateStatus.SUCCESS == ClientUpdateStatus(new_status):
DBUSManager.GLIB_LOOP.quit()
At this stage I would recommend to do a dbus-monitor to see if agent and BLE are reacting to requests properly.
Maybe to help others in the future I haven't found the solution but at least a way to work around it. We have encountered that problem at various places in our project and what generally helped, don't ask me why, was to always re-instantiate all needed dbus objects. So instead of having a single class that has a system bus variable self._system_bus = dbus.SystemBus() or a interface variable self._manager_interface = dbus.Interface(proxy_object, "org.freedesktop.DBus.ObjectManager") we would always re-instiate them.
If somebody knows what the problem is I'm happy to hear it.
I am trying to connect with IB Api to download some historical data. I have noticed that my client connects to the API, but then disconnects automatically in a very small period (~a few seconds).
Here's the log in the server:
socket connection for client{10} has closed.
Connection terminated.
Here's my main code for starting the app:
class TestApp(TestWrapper, TestClient):
def __init__(self):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(config.ib_hostname, config.ib_port, config.ib_session_id)
self.session_id = int(config.ib_session_id)
self.thread = Thread(target = self.run)
self.thread.start()
setattr(self, "_thread", self.thread)
self.init_error()
def reset_connection(self):
pass
def check_contract(self, name, exchange_name, security_type, currency):
self.reset_connection()
ibcontract = IBcontract()
ibcontract.secType = security_type
ibcontract.symbol = name
ibcontract.exchange = exchange_name
ibcontract.currency = currency
return self.resolve_ib_contract(ibcontract)
def resolve_contract(self, security):
self.reset_connection()
ibcontract = IBcontract()
ibcontract.secType = security.security_type()
ibcontract.symbol=security.name()
ibcontract.exchange=security.exchange()
ibcontract.currency = security.currency()
return self.resolve_ib_contract(ibcontract)
def get_historical_data(self, security, duration, bar_size, what_to_show):
self.reset_connection()
resolved_ibcontract=self.resolve_contract(security)
data = test_app.get_IB_historical_data(resolved_ibcontract.contract, duration, bar_size, what_to_show)
return data
def create_app():
test_app = TestApp()
return test_app
Any suggestions on what could be the problem? I can show more error messages from the debug if needed.
If you can connect without issue only by changing the client ID, typically that indicates that the previous connection was not properly closed and TWS thinks its still open. To disconnect an API client you should call the EClient.disconnect function explicity, overridden in your example as:
test_app.disconnect()
Though its not necessary to disconnect/reconnect after every task, and you can just leave the connection open for extended periods.
You may sometimes encounter problems if an API function, such as reqHistoricalData, is called immediately after connection. Its best to have a small pause after initiating a connection to wait for a callback such as nextValidID to ensure the connection is complete before proceeding.
http://interactivebrokers.github.io/tws-api/connection.html#connect
I'm not sure what the function init_error() is intended for in your example since it would always be called when a TestApp object is created (whether or not there is an error).
Installing the latest version of TWS API (v 9.76) solved the problem.
https://interactivebrokers.github.io/#
Looking at the celery docs i can see that the task monitor is launched in a script (see below). In an implementation of django (as is my understanding), this won't be the case, as (in my understanding) I'll have to launch the task monitor in a thread.
Currently I'm launching the monitor the first time i run a job, then checking its state each subsequent time i run a job (see further below). This seems like a bad way to do this.
My question is globally: What is the correct way to instantiate the task monitor for celery in a django project? but a good answer would include:
Is threading the accepted way to do this?
Should i launch this in a sub process
do i need to be worried about volume going through the task monitor (hence i should use threading)
Is there a standard, widely accepted way to do this?
It seems I'm missing something really obvious.
# docs example - not implemented like this in my project
from celery import Celery
def my_monitor(app):
state = app.events.State()
def announce_failed_tasks(event):
state.event(event)
# task name is sent only with -received event, and state
# will keep track of this for us.
task = state.tasks.get(event['uuid'])
print('TASK FAILED: %s[%s] %s' % (
task.name, task.uuid, task.info(),))
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'task-failed': announce_failed_tasks,
})
recv.capture(limit=None, timeout=None, wakeup=True)
if __name__ == '__main__':
app = Celery(broker='amqp://guest#localhost//')
# LAUNCHED HERE
my_monitor(app)
# my current implementation
# If the celery_monitor is not instantiated, set it up
app = Celery('scheduler',
broker=rabbit_url, # Rabbit-MQ
backend=redis_url, # Redis
include=tasks
)
celery_monitor = Thread(target=build_monitor, args=[app], name='monitor-global', daemon=True)
# import celery_monitor into another module
global celery_monitor
if not celery_monitor.is_alive():
try:
celery_monitor.start()
logger.debug('Celery Monitor - Thread Started (monitor-retry) ')
except RuntimeError as e: # occurs if thread is dead
# create new instance if thread is dead
logger.debug('Celery Monitor - Error restarting thread (monitor-rety): {}'.format(e))
celery_monitor = Thread(target=build_monitor, args=[app], name='monitor-retry', daemon=True)
celery_monitor.start() # start thread
logger.debug('Celery Monitor - Thread Re-Started (monitor-retry) ')
else:
logger.debug('Celery Monitor - Thread is already alive. Dont do anything.')
I'm running the same code; the following works in Windows, but will run correctly on Ubuntu (16.04).
import websocket
import json
class WhatEver(object):
def __init__(self):
self.ws = websocket.WebSocketApp(
'wss://beijing.51nebula.com/',
on_message=self.on_ws_message,
on_open=self.on_open
)
def rin_forever(self):
print("start run forever")
self.ws.run_forever()
def on_ws_message(self, ws,message):
print (message)
self.ws.close()
def _send_msg(self, params):
call = {"id": 1, "method": "call",
"params": params}
self.ws.send(json.dumps(call))
def on_open(self, ws):
print("start open function")
self._send_msg([1, "login", ["",""]])
if __name__ == '__main__':
ws=WhatEver()
print("start")
ws.rin_forever()
print("close")
I've tried to reinstalled all modules (including the same version of Python and websocket between both Windows and Ubuntu), the print of this code is correct on the Windows system:
start
start run forever
start open function
{"id":1,"jsonrpc":"2.0","result":true}
close
But when it run in Ubuntu, while it does print, it misses some print statements:
start
start run forever
close
When I debug the code in Ubuntu, I found that the main thread stops in the self.ws.run_forever() call and never jumps to the on_open function. Then it breaks out.
You are using two different versions of the library, with the version on Windows being older than version 0.53. As of version 0.53, the websocket project differentiates callback behaviour between bound methods and regular functions.
You are passing in bound methods (self.on_open and self.on_ws_message), at which point the ws argument is not passed in. Those methods are apparently expected to have access to the websocket already via their instance, probably because the expected use-case is to create a subclass from the socket class.
This is unfortunately not documented by the project, and the change appears to have been causing problems for more people.
So for version 0.53 and newer, drop the ws argument from your callbacks:
class WhatEver(object):
def __init__(self):
self.ws = websocket.WebSocketApp(
'wss://beijing.51nebula.com/',
on_message=self.on_ws_message,
on_open=self.on_open
)
# ...
def on_ws_message(self, message):
print(message)
self.ws.close()
# ...
def on_open(self):
print("start open function")
self._send_msg([1, "login", ["", ""]])
And you can discover issues like these by enabling logging; the websocket module logs exceptions it encounters in callbacks to the logger.getLogger('websocket') logger. A quick way to see these issues is to enable tracing:
websocket.enableTrace(True)
which adds a logging handler just to that logging object, turns on logging.DEBUG level reporting for that object and in addition enables full socket data echoing.
Or you can configure logging to output messages in general with the logging.basicConfig() function:
import logging
logging.basicConfig()
which lets you see logging.ERROR level messages and up.
With using the latter option, the uncorrected version of the code prints out:
start
start run forever
ERROR:websocket:error from callback <bound method WhatEver.on_open of <__main__.WhatEver object at 0x1119ec668>>: on_open() missing 1 required positional argument: 'ws'
close
You can verify the version of websocket-client you have installed by printing websocket.__version__:
>>> import websocket
>>> websocket.__version__
'0.54.0'
Suppose I have a QThread for running a plugin in my app and the plugin connects to a server specified by the user. When the user changes the server settings, the plugin should connect to the new server - as expected. Would it be a good idea to simply terminate the current plugin worker thread and spin up a new one when the user updates the settings?
This is what I've got at the moment
class MainWindow(QMainWindow):
def __init__(self):
# ...
self.hostname.editingFinished.connect(
lambda: self._setup_new_server() or
self._restart_plugin_work_thread()
)
self.port.editingFinished.connect(
lambda: self._setup_new_server() or
self._restart_plugin_work_thread()
)
def _create_plugin_worker_thread(self):
self.plugin_worker_thread = QtCore.QThread()
self.plugin_worker = PluginWorker()
self.plugin_worker.moveToThread(self.plugin_worker_thread)
self.plugin_worker_thread.start()
self.plugin_worker.run_plugin_signal.connect(self.plugin_worker.run_plugin)
self.plugin_worker.stop_plugin_signal.connect(self.plugin_worker.stop_run_plugin)
def _terminate_plugin_worker_thread(self):
self.plugin_worker_thread.terminate()
def _restart_plugin_work_thread(self):
# terminate the current worker thread
self._terminate_plugin_worker_thread()
# create a new worker thread
self._create_plugin_worker_thread()
class PluginWorker(QtCore.QObject):
run_plugin_signal = QtCore.Signal(str, int, str, str)
stop_plugin_signal = QtCore.Signal()
# ...
PluginWorker is the worker class which mostly relies on a QTimer that triggers the plugin's execution method every 2 seconds.
Any help will be appreciated. Thanks.
There are several solutions. Though I haven't worked with QT.
If it was my code I would kill the thread and start a new one with the updated connection settings.
At thread construction pass a threading.Event object to the thread and also hold a reference from the main thread. When the connection string is updated set the event and create a new thread (passing a new event object). Within the thread function return if the event has been set.