I am currently trying to create a win32 service using pywin32. My main point of reference has been this tutorial:
http://code.activestate.com/recipes/551780/
What i don't understand is the initialization process, since the Daemon is never initialized directly by Daemon(), instead from my understanding its initialized by the following:
mydaemon = Daemon
__svc_regClass__(mydaemon, "foo", "foo display", "foo description")
__svc_install__(mydaemon)
Where svc_install, handles the initalization, by calling Daemon.init() and passing some arguments to it.
But how can i initialize the daemon object, without initalizing the service? I want to do a few things, before i init the service. Does anyone have any ideas?
class Daemon(win32serviceutil.ServiceFramework):
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
self.run()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def start(self):
pass
def stop(self):
self.SvcStop()
def run(self):
pass
def __svc_install__(cls):
win32api.SetConsoleCtrlHandler(lambda x: True, True)
try:
win32serviceutil.InstallService(
cls._svc_reg_class_,
cls._svc_name_,
cls._svc_display_name_,
startType = win32service.SERVICE_AUTO_START
)
print "Installed"
except Exception, err:
print str(err)
def __svc_regClass__(cls, name, display_name, description):
#Bind the values to the service name
cls._svc_name_ = name
cls._svc_display_name_ = display_name
cls._svc_description_ = description
try:
module_path = sys.modules[cls.__module__].__file__
except AttributeError:
from sys import executable
module_path = executable
module_file = os.path.splitext(os.path.abspath(module_path))[0]
cls._svc_reg_class_ = '%s.%s' % (module_file, cls.__name__)
I just create a simple "how to" where the program is in one module and the service is in another place, it uses py2exe to create the win32 service, which I believe is the best you can do for your users that don't want to mess with the python interpreter or other dependencies.
You can check my tutorial here: Create win32 services using Python and py2exe
I've never used these APIs, but digging through the code, it looks like the class passed in is used to register the name of the class in the registry, so you can't do any initialization of your own. But there's a method called GetServiceCustomOption that may help:
http://mail.python.org/pipermail/python-win32/2006-April/004518.html
Related
I'm new at NAO programming and I'm having some trouble regarding the ALAudioDevice API.
My problem is the following one: I wrote a python module that should record raw data from the front microphone.
The documentation of the ALAudioDevice API says that the method "subscribe(...)" calls the function "process" automatically
and regularly with raw data from microphones as inputs. I wrote a code to execute this process (see bellow), and it runs without raising
the error flag. However, the subscribe is bypassing the function "process" and the module doesn't get any audio at all.
Has someone had the same problem?
import qi
class AudioModule(object):
def __init__(self):
super(AudioModule, self).__init__()
self.moduleName = "AudioModule"
try :
self.ALAudioDevice = ALProxy("ALAudioDevice")
except Exception, e:
self.logger.error("Error when creating proxy on ALAudioDevice:")
self.logger.error(e)
def begin_stream(self):
self.ALAudioDevice.setClientPreferences(self.moduleName, 16000, 3, 0)
self.ALAudioDevice.subscribe(self.moduleName)
def end_stream(self):
self.ALAudioDevice.unsubscribe(self.moduleName)
def processRemote( self, nbOfChannels, samplesByChannel, altimestamp, buffer ):
nbOfChannels = nbOfChannels
mylogger = qi.Logger("data")
mylogger.info("It works !" + str(nbOfChannels))
class MyClass(GeneratedClass):
def __init__(self):
GeneratedClass.__init__(self, False)
self.audio = AudioModule()
def onLoad(self):
self.serviceId = self.session().registerService("AudioModule", self.audio)
pass
def onUnload(self):
if self.serviceId != -1:
self.session().unregisterService(self.serviceId)
self.serviceId = -1
pass
def onInput_onStart(self):
self.audio.begin_stream()
self.onInput_onStop()
pass
def onInput_onStop(self):
self.audio.end_stream()
self.onUnload
self.onStopped()
pass
It appears you are subscribing to the audio from a Choregraphe box. I'm not sure it is supposed to work.
But in this configuration the Python code is executed from within the same process as the ALAudioDevice service. So probably you should name your callback "process" instead of "processRemote".
Otherwise, you can still do this from a separate Python script.
I'm trying to figure out if subclassing QtConcurrent and writing a run method inside it will work:
class Task(QtCore.QtConcurrent):
def run(self, function):
function()
Or is it completely useless?
It's completely useless, because QtConcurrent is a namespace, not a class.
Also, neither PyQt nor PySide provide any of the functionality provided by QtConcurrent, because it's all template-based and therefore impossible to wrap.
PS: the PySide documentation you linked to is for the ReduceOption enum. Since it's doubtful whether that enum has any use outside the QtConcurrent namespace, it's probably a bug that PySide includes it.
The class you are looking for is QRunnable.
I am stuck on the same problem in PyQt5. I guess the only solution is to do this locally:
def connect(self):
class ConnectThread(QThread):
def __init__(self, func):
super().__init__()
self.func = func
def run(self):
self.func()
self.connectThread = ConnectThread(self._connect)
self.connectThread.start()
def _connect(self):
if self._driver is None:
uri = self.uriString()
if uri and self.user and self.password:
self.statusMessage.emit("Connecting to the Graph Database....", -1, "color:blue;")
try:
self._driver = GraphDatabase.driver(uri, auth=(self.user, self.password))
self.statusMessage.emit("Connected!", 5000, "color:green;")
except Exception as e:
self.clearStatusMessage.emit()
Error(str(e)).exec_()
if __debug__:
raise e
And remember to set the thread to a member variable: self.thread = ... or else your thread reference will go out of scope, and most likely the thread object deleted.
You could also move your function-to-call into a local definition of it as Python allows both nested functions and classes within one another!
I have a file containing some data – data.txt (existing in proper localization). I would like that django app processes this file before starting app and reacts for every change (without restart). What is the best way to do it?
For startup you can write middleware that does what you want in init and afterwards raise django.core.exceptions.MiddlewareNotUsed from the init, so the django will not use it for any request processing. docs
And middleware init will be called at startup, not at the first request.
As for react to file changes you can use https://github.com/gorakhargosh/watchdog ( an example of usage can be found here).
So you can either start it somewhere in middleware too, or if its only db updates you can create a separate script(or django management command) that will be run via supervisor or something like this and will monitor this file and update the db.
An option could be pynotify that monitors the filesystem for changes, it works only on Linux though.
Otherwise look at the code of runserver command that seems to do the same thing (the code of the autoreload module is here).
To run a command before starting an app I suppose you can write some code in the settings module.
maybe you could put a object in the settings which will lookup to the file for every change. ...
ie :
make a class who will load the file and reload this one if he is modified
class ExtraConfigWatcher(object):
def __init__(self, file):
self.file = file
self.cached = dict()
self.last_date_modified = None
def update_config(self):
"""
update the config by reloading the file
"""
if has_been_modified(self.file, self.last_date_modified):
# regenerate the config with te file.
self.cached = get_dict_with_file(self.file)
self.last_date_modified = time.time()
def __getitem__(self, *args, **kwargs):
self.update_config()
return self.cached.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
raise NotImplemented("you can't set config into this")
in settings.py: initialize this object
EXTRA_CONFIG = ExtraConfigWatcher("path/to/the/file.dat")
in myapps/views.py: import settings and use EXTRA_CONFIG
from django.conf import settings
def dosomthing(request):
if settings.EXTRA_CONFIG["the_data_from_the_file"] == "foo":
# bouhh
A while ago I was trying to find a mechanism to "hot-swap" Python modules. While that is not exactly what you need, maybe you can use the implementation I proposed, and monitor your configuration file for modifications and act accordingly.
The code I proposed is the following (I did not use inotify because I am working in an NFS file system):
import imp
import time
import hashlib
import threading
import logging
logger = logging.getLogger("")
class MonitorThread(threading.Thread):
def __init__(self, engine, frequency=1):
super(MonitorThread, self).__init__()
self.engine = engine
self.frequency = frequency
# daemonize the thread so that it ends with the master program
self.daemon = True
def run(self):
while True:
with open(self.engine.source, "rb") as fp:
fingerprint = hashlib.sha1(fp.read()).hexdigest()
if not fingerprint == self.engine.fingerprint:
self.engine.notify(fingerprint)
time.sleep(self.frequency)
class Engine(object):
def __init__(self, source):
# store the path to the engine source
self.source = source
# load the module for the first time and create a fingerprint
# for the file
self.mod = imp.load_source("source", self.source)
with open(self.source, "rb") as fp:
self.fingerprint = hashlib.sha1(fp.read()).hexdigest()
# turn on monitoring thread
monitor = MonitorThread(self)
monitor.start()
def notify(self, fingerprint):
logger.info("received notification of fingerprint change ({0})".\
format(fingerprint))
self.fingerprint = fingerprint
self.mod = imp.load_source("source", self.source)
def __getattr__(self, attr):
return getattr(self.mod, attr)
def main():
logging.basicConfig(level=logging.INFO,
filename="hotswap.log")
engine = Engine("engine.py")
# this silly loop is a sample of how the program can be running in
# one thread and the monitoring is performed in another.
while True:
engine.f1()
engine.f2()
time.sleep(1)
if __name__ == "__main__":
main()
I'm trying to dynamically add signals to a D-Bus service using dbus-python. It provides a decorator for this that works fine if the signal names are known at module load time; however, I don't know what name to export to D-Bus until runtime.
To illustrate the problem, what I'd like to do is the moral equivalent of:
import dbus
import dbus.service
import gobject
from dbus.mainloop.glib import DBusGMainLoop
class Event(dbus.service.Object):
def __init__(self, name):
self.name = name
self.busName = dbus.service.BusName('com.acme.EventManager',
bus=dbus.SessionBus())
dbus.service.Object.__init__(self,
self.busName,
'/com/acme/EventManager/' +
self.name)
self.signame = 'com.acme.EventManager.' + self.name
# THIS DOES NOT WORK: this decorator is parsed before the Event
# class, and 'self' wouldn't exist here, anyway...
#dbus.service.signal(dbus_interface=self.signame, signature='v')
def emit(self, data):
print "In %s event, got: %s " % (self.name, data)
if __name__ == "__main__":
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
loop = gobject.MainLoop()
connect = Event('Connect')
disconnect = Event('Disconnect')
loop.run()
Not surprisingly, this generates:
#dbus.service.signal(dbus_interface=self.signame, signature='v')
NameError: name 'self' is not defined
I thought I could just dispense with the syntactic sugar provided by the # decoration operator and patch Event manually after the class has been defined, something like this:
import dbus
import dbus.service
import gobject
from dbus.mainloop.glib import DBusGMainLoop
class Event(dbus.service.Object):
def __init__(self, name):
self.name = name
self.busName = dbus.service.BusName('com.acme.EventManager',
bus=dbus.SessionBus())
dbus.service.Object.__init__(self,
self.busName,
'/com/acme/EventManager/' +
self.name)
self.signame = 'com.acme.EventManager.' + self.name
def emit(self, data):
print "In %s event, got: %s " % (self.name, data)
if __name__ == "__main__":
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
loop = gobject.MainLoop()
e1 = Event('Connect')
e1.emit = dbus.service.signal(dbus_interface=e1.signame,
signature='v')(e1.emit)
loop.run()
This runs without errors, but it fails to export the signals to D-Bus. When I run D-Feet, I see the object path /com/acme/EventManager/Connect but it has no interface methods aside from Introspect().
To see if I could learn something about what was going on, I examined the value of the function passed to the dbus.service.signal decorator in a debugger. For this typical use case:
#dbus.service.signal(dbus_interface='com.acme.foo', signature='v')
def emit(self, data):
pass
the function passed in to the decorator (in the variable func) looks like this:
>>> func
>>> <function emit at 0x99fbed4>
But when I manually invoke the decorator function (as in the the e1.emit = assignment in the second example above), I see:
>>> func
>>> <bound method Event.emit of <__main__.Event at /com/acme/EventManager/Connect at 0x9b3348c>>
So... it seems that, for the normal use case, dbus.service.signal expects to receive a free function---not an unbound function, but a function that, for all intents and purposes, looks like it was defined using:
def emit():
pass
This behavior is utterly mystifying to me. I've read lots of tutorials on decorators, and thought I understood them pretty well, but I have been stumped on this for hours. If this decorator expects to be called with 'raw' function, how can I convert an object method such that I can invoke it manually?
From this question, I see that types.MethodType() can be used to convert a free function to a bound method. But it seems I need to do the opposite(?)
I think one way to do what you want is by using a factory function — however the following is untested because I don't have the D-Bus module installed.
The root of the problem is you're attempting to use a decorator at class definition time that requires data which isn't being provided until instances of that class are created. One workaround for that is to define the class inside a function and use closures so the data is available when needed. Note that the factory function returns instances of the class created, not the class itself, although it could if desired.
import dbus
import dbus.service
import gobject
from dbus.mainloop.glib import DBusGMainLoop
def event_factory(event_name):
class Event(dbus.service.Object):
def __init__(self):
self.busName = dbus.service.BusName('com.acme.EventManager',
bus=dbus.SessionBus())
dbus.service.Object.__init__(self,
self.busName,
'/com/acme/EventManager/'+event_name)
#dbus.service.signal(dbus_interface='com.acme.EventManager.'+event_name,
signature='v')
def emit(self, data):
print "In %s event, got: %s " % (event_name, data)
return Event() # return an instance of the class
if __name__ == "__main__":
DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
loop = gobject.MainLoop()
connect = event_factory('Connect')
disconnect = event_factory('Disconnect')
loop.run()
Basically I'm building an application that has a couple of numbered options for you to pick from.
It's named main.py, I wrote standalone modules for each possible option so I can run the modules seperately. Now this one module I wrote It contains a threaded class. a problem I'm having when I command : python mod_keepOnline.py is that it does not pass control back to the terminal |AND| When I run the module trough main.py, main.py stops listening for a new choice to pick. I know it's because of the threads. I was wondering how I can "let the threads manage their own after they have been spawned". So get back control from mod_keepOnline.py to the terminal or main script.
I also want to be able to kill the released threads again.
something like mod_keepOnline.py -killAll
Uhm heres my code :
###########################################
################## SynBitz.net ############
import threading
import objects
import time
import mechanize
import os
import gb
##########################################
class Class_putOnline (threading.Thread):
def __init__ (self,person,onlineTime):
threading.Thread.__init__ (self)
self.startTime = time.time()
self.alive = True
self.person = person
self.onlineTime = onlineTime
self.firstMessage=True
def run(self):
while(self.alive):
if(self.firstMessage):
print self.person.getInfo() + " SPAWNED ONLINE"
self.firstMessage=False
self.person.login()
time.sleep(300)
self.person.logout()
if((time.time()-self.startTime) > self.onlineTime):
print self.person.getInfo() + " SPAWNED OFFLINE "
self.alive = False
self._Thread__stop()
#########################################
def main():
for line in open(gb.accFile,"r"):
gb.accountList.append(line.rstrip('\n'))
for account in gb.accountList:
gb.accountInfo = account.split('|',4)
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.set_handle_redirect(True)
browser.set_handle_referer(True)
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
gb.spiderList.append(objects.spider.Bot(gb.accountInfo[0],gb.accountInfo[2],gb.accountInfo[1],gb.accountInfo[3],browser))
if gb.accountInfo[2] not in gb.distros:
gb.distros.append(gb.accountInfo[2])
onlineAccounts = []
for index, acc in enumerate(gb.spiderList):
onlineAccounts.append(Class_putOnline(acc,115200)) # 600*6*8*4= 28800 = 8 uur 3600 test seconds = 1 h (1200 seconds for test time of 20 minutes... )
time.sleep(0.1)
onlineAccounts[index].start()
if __name__ == "__main__":
main()
When I open a ssh session to my server and run a python script, even when I run it in background, it dies after I close my session. How do i keep my scripts running when I'm not connected?
When I open a ssh session to my server and run a python script, even when I run it in background, it dies after I close my session. How do i keep my scripts running when I'm not connected?
Run it as a cronjob, and manually start the cronjob if you need to run script on demand.
Ok I'm pretty new to python
Me too.
EDIT:
Quick tip, use """ for long comments.
Example:
"""Description:
This does this and that and extends this and that. Use it like this and that.
"""
as I understand:
When you run a process then the input and output of the terminal is redirected to the process input and output.
If you start threads this will not change anything with this. The process has the terminal in and output as long both exist. What you can do is send this program to the background (with control-z).
If you run a program, it has its own namespace. You can import a module and change attributes of it but this will never change the module in another program.
If you want to have two programs, One is in the background running all the tine (eg with jobs as proposed by Tyson) and one from the command line, you need to communicate between those two processes.
Maybe there are other ways to circumwent the borders of processes but i do not know them.
Therefore I wrote this module where I can store values in. Everytime a value is stored directly, the state of the module is saved to disk.
'''
This is a module with persistent attributes
the attributes of this module are spread all over all instances of this module
To set attributes:
import runningConfiguration
runningConfiguration.x = y
to get attributes:
runningConfiguration.x
'''
import os
import types
import traceback
fn = fileName = fileName = os.path.splitext(__file__)[0] + '.conf'
class RunningConfiguration(types.ModuleType):
fileName = fn
def __init__(self, *args, **kw):
types.ModuleType.__init__(self, *args, **kw)
import sys
sys.modules[__name__] = self
self.load()
def save(self):
import pickle
pickle.dump(self.__dict__, file(self.fileName, 'wb'))
def load(self):
import pickle
try:
dict = pickle.load(file(self.fileName, 'rb'))
except EOFError:
pass
except:
import traceback
traceback.print_exc()
else:
self.__dict__.update(dict)
def __setattr__(self, name, value):
## print 'set', name, value,
l = []
v1 = self.__dict__.get(name, l)
self.__dict__[name] = value
try:
self.save()
## print 'ok'
except:
if v1 is not l:
self.__dict__[name] = v1
raise
def __getattribute__(self, name):
import types
if name in ('__dict__', '__class__','save','load','__setattr__', '__delattr__', 'fileName'):
return types.ModuleType.__getattribute__(self, name)
## print 'get', name
self.load()
l = []
ret = self.__dict__.get(name, l)
if ret is l:
if hasattr(self.__class__, name):
return getattr(self.__class__, name)
if name in globals():
return globals()[name]
raise AttributeError('%s object has no attribute %r' % (self.__class__.__name__, name))
return ret
def __delattr__(self, name):
del self.__dict__[name]
self.save()
RunningConfiguration(__name__)
I saved it to runningConfiguration.py.
You can use it like this:
# program1
import runningConfiguration
if not hasattr(runningConfiguration, 'programs'):
runningConfiguration.programs = [] ## variable programs is set
runningConfiguration.programs+= ['program1'] ## list is changed and = is used -> module is saved
This is an insecure module and not everything can be saved to it, but many things.
Also when two modules save at the same time the first written values may get lost.
Try it out: import ist from two different programs and see how it behaves.