releasing modules with threaded classes and controlling them python - python

Basically I'm building an application that has a couple of numbered options for you to pick from.
It's named main.py, I wrote standalone modules for each possible option so I can run the modules seperately. Now this one module I wrote It contains a threaded class. a problem I'm having when I command : python mod_keepOnline.py is that it does not pass control back to the terminal |AND| When I run the module trough main.py, main.py stops listening for a new choice to pick. I know it's because of the threads. I was wondering how I can "let the threads manage their own after they have been spawned". So get back control from mod_keepOnline.py to the terminal or main script.
I also want to be able to kill the released threads again.
something like mod_keepOnline.py -killAll
Uhm heres my code :
###########################################
################## SynBitz.net ############
import threading
import objects
import time
import mechanize
import os
import gb
##########################################
class Class_putOnline (threading.Thread):
def __init__ (self,person,onlineTime):
threading.Thread.__init__ (self)
self.startTime = time.time()
self.alive = True
self.person = person
self.onlineTime = onlineTime
self.firstMessage=True
def run(self):
while(self.alive):
if(self.firstMessage):
print self.person.getInfo() + " SPAWNED ONLINE"
self.firstMessage=False
self.person.login()
time.sleep(300)
self.person.logout()
if((time.time()-self.startTime) > self.onlineTime):
print self.person.getInfo() + " SPAWNED OFFLINE "
self.alive = False
self._Thread__stop()
#########################################
def main():
for line in open(gb.accFile,"r"):
gb.accountList.append(line.rstrip('\n'))
for account in gb.accountList:
gb.accountInfo = account.split('|',4)
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.set_handle_redirect(True)
browser.set_handle_referer(True)
browser.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
gb.spiderList.append(objects.spider.Bot(gb.accountInfo[0],gb.accountInfo[2],gb.accountInfo[1],gb.accountInfo[3],browser))
if gb.accountInfo[2] not in gb.distros:
gb.distros.append(gb.accountInfo[2])
onlineAccounts = []
for index, acc in enumerate(gb.spiderList):
onlineAccounts.append(Class_putOnline(acc,115200)) # 600*6*8*4= 28800 = 8 uur 3600 test seconds = 1 h (1200 seconds for test time of 20 minutes... )
time.sleep(0.1)
onlineAccounts[index].start()
if __name__ == "__main__":
main()
When I open a ssh session to my server and run a python script, even when I run it in background, it dies after I close my session. How do i keep my scripts running when I'm not connected?

When I open a ssh session to my server and run a python script, even when I run it in background, it dies after I close my session. How do i keep my scripts running when I'm not connected?
Run it as a cronjob, and manually start the cronjob if you need to run script on demand.
Ok I'm pretty new to python
Me too.
EDIT:
Quick tip, use """ for long comments.
Example:
"""Description:
This does this and that and extends this and that. Use it like this and that.
"""

as I understand:
When you run a process then the input and output of the terminal is redirected to the process input and output.
If you start threads this will not change anything with this. The process has the terminal in and output as long both exist. What you can do is send this program to the background (with control-z).
If you run a program, it has its own namespace. You can import a module and change attributes of it but this will never change the module in another program.
If you want to have two programs, One is in the background running all the tine (eg with jobs as proposed by Tyson) and one from the command line, you need to communicate between those two processes.
Maybe there are other ways to circumwent the borders of processes but i do not know them.
Therefore I wrote this module where I can store values in. Everytime a value is stored directly, the state of the module is saved to disk.
'''
This is a module with persistent attributes
the attributes of this module are spread all over all instances of this module
To set attributes:
import runningConfiguration
runningConfiguration.x = y
to get attributes:
runningConfiguration.x
'''
import os
import types
import traceback
fn = fileName = fileName = os.path.splitext(__file__)[0] + '.conf'
class RunningConfiguration(types.ModuleType):
fileName = fn
def __init__(self, *args, **kw):
types.ModuleType.__init__(self, *args, **kw)
import sys
sys.modules[__name__] = self
self.load()
def save(self):
import pickle
pickle.dump(self.__dict__, file(self.fileName, 'wb'))
def load(self):
import pickle
try:
dict = pickle.load(file(self.fileName, 'rb'))
except EOFError:
pass
except:
import traceback
traceback.print_exc()
else:
self.__dict__.update(dict)
def __setattr__(self, name, value):
## print 'set', name, value,
l = []
v1 = self.__dict__.get(name, l)
self.__dict__[name] = value
try:
self.save()
## print 'ok'
except:
if v1 is not l:
self.__dict__[name] = v1
raise
def __getattribute__(self, name):
import types
if name in ('__dict__', '__class__','save','load','__setattr__', '__delattr__', 'fileName'):
return types.ModuleType.__getattribute__(self, name)
## print 'get', name
self.load()
l = []
ret = self.__dict__.get(name, l)
if ret is l:
if hasattr(self.__class__, name):
return getattr(self.__class__, name)
if name in globals():
return globals()[name]
raise AttributeError('%s object has no attribute %r' % (self.__class__.__name__, name))
return ret
def __delattr__(self, name):
del self.__dict__[name]
self.save()
RunningConfiguration(__name__)
I saved it to runningConfiguration.py.
You can use it like this:
# program1
import runningConfiguration
if not hasattr(runningConfiguration, 'programs'):
runningConfiguration.programs = [] ## variable programs is set
runningConfiguration.programs+= ['program1'] ## list is changed and = is used -> module is saved
This is an insecure module and not everything can be saved to it, but many things.
Also when two modules save at the same time the first written values may get lost.
Try it out: import ist from two different programs and see how it behaves.

Related

Hitting connection paused fork issues with pymongo cause I need to access to db to configure before multiprocessing starts

I've been struggling with this for a couple months now and have tried a lot of different things to try to alleviate but am not sure what to do anymore. All the examples that I see are different than what I need and in my case it just wouldn't work.
To preface the problem, I have processor applications that get spawned by a manager as a docker container. The processor is a single class that gets run in a forever while loop that processes the same list of items over and over again and runs a function on them. The code I'm working with is quite large so I created a smaller version of the problem below.
this is how I create my engine
db.py
from os import getpid
from pymongo import MongoClient
_mongo_client = None
_mongo_client_pid = None
def get_mongodb_uri(MONGO_DB_HOST, MONGO_DB_PORT) -> str:
return 'mongodb://{}:{}/{}'.format(MONGO_DB_HOST, MONGO_DB_PORT, 'taskprocessor')
def get_db_engine():
global _mongo_client, _mongo_client_pid
curr_pid = getpid()
if curr_pid != _mongo_client_pid:
_mongo_client = MongoClient(get_mongodb_uri(), connect=False)
_mongo_client_pid = curr_pid
return _mongo_client
def get_db(name):
return get_db_engine()['taskprocessor'][name]
These are my DB models
processor.py
from uuid import uuid4
from taskprocessor.db import get_db
class ProcessorModel():
db = get_db("processors")
def __init__(self, **kwargs):
self.uid = kwargs.get('uid', str(uuid4()))
self.exceptions = kwargs.get('exceptions', [])
self.to_process = kwargs.get('to_process', [])
self.functions = kwargs.get('functions', ["int", "round"])
def save(self):
return self.db.insert_one(self.__dict__).inserted_id is not None
#classmethod
def get(cls, uid):
res = cls.db.find_one(dict(uid=uid))
return ProcessorModel(**res)
result.py
from uuid import uuid4
from taskprocessor.db import get_db
class ResultModel():
db = get_db("results")
def __init__(self, **kwargs):
self.uid = kwargs.get('uid', str(uuid4()))
self.res = kwargs.get('res', dict())
def save(self):
return self.db.insert_one(self.__dict__).inserted_id is not None
And my main.py that gets started as a docker container
to run a forever loop
import os
from time import sleep
from taskprocessor.db.processor import ProcessorModel
from taskprocessor.db.result import ResultModel
from multiprocessing import Pool
class Processor:
def __init__(self):
self.id = os.getenv("PROCESSOR_ID")
self.db_model = ProcessorModel.get(self.id)
self.to_process = self.db_model.to_process # list of floats [1.23, 1.535, 1.33499, 242.2352, 352.232]
self.functions = self.db_model.functions # list i.e ["round", "int"]
def run(self):
while True:
try:
pool = Pool(2)
res = list(pool.map(self.analyse, self.to_process))
print(res)
sleep(100)
except Exception as e:
self.db_model = ProcessorModel.get(os.getenv("PROCESSOR_ID"))
self.db_model.exceptions.append(f"exception {e}")
self.db_model.save()
print("Exception")
def analyse(self, item):
res = {}
for func in self.functions:
if func == "round":
res['round'] = round(item)
if func == "int":
res['int'] = int(item)
ResultModel(res=res).save()
return res
if __name__ == "__main__":
p = Processor()
p.run()
I've tried setting connect=False, or even trying to close the connection after the configuration but then end up with connection closed errors. I also tried using a system of recognizing the PID and giving a different client but that still did not help.
Almost all examples I see are where the DB access is not needed before the multiprocessing fork. In my case the initial configuration is heavy and cannot be efficient to do every single time in the process loop. Furthermore the items to process themselves depends on the data from the DB.
I can live with not being able to save the exceptions to the db object from the main pid.
I'm seeing the error logs around fork safety as well as hitting connection pool paused errors as as symptom of this issue.
If anybody sees this, I was using pymongo 4.0.2 and upgraded to 4.3.3 and not seeing the errors I was previously seeing.

Processing some file before starting app and reacting for every change

I have a file containing some data – data.txt (existing in proper localization). I would like that django app processes this file before starting app and reacts for every change (without restart). What is the best way to do it?
For startup you can write middleware that does what you want in init and afterwards raise django.core.exceptions.MiddlewareNotUsed from the init, so the django will not use it for any request processing. docs
And middleware init will be called at startup, not at the first request.
As for react to file changes you can use https://github.com/gorakhargosh/watchdog ( an example of usage can be found here).
So you can either start it somewhere in middleware too, or if its only db updates you can create a separate script(or django management command) that will be run via supervisor or something like this and will monitor this file and update the db.
An option could be pynotify that monitors the filesystem for changes, it works only on Linux though.
Otherwise look at the code of runserver command that seems to do the same thing (the code of the autoreload module is here).
To run a command before starting an app I suppose you can write some code in the settings module.
maybe you could put a object in the settings which will lookup to the file for every change. ...
ie :
make a class who will load the file and reload this one if he is modified
class ExtraConfigWatcher(object):
def __init__(self, file):
self.file = file
self.cached = dict()
self.last_date_modified = None
def update_config(self):
"""
update the config by reloading the file
"""
if has_been_modified(self.file, self.last_date_modified):
# regenerate the config with te file.
self.cached = get_dict_with_file(self.file)
self.last_date_modified = time.time()
def __getitem__(self, *args, **kwargs):
self.update_config()
return self.cached.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
raise NotImplemented("you can't set config into this")
in settings.py: initialize this object
EXTRA_CONFIG = ExtraConfigWatcher("path/to/the/file.dat")
in myapps/views.py: import settings and use EXTRA_CONFIG
from django.conf import settings
def dosomthing(request):
if settings.EXTRA_CONFIG["the_data_from_the_file"] == "foo":
# bouhh
A while ago I was trying to find a mechanism to "hot-swap" Python modules. While that is not exactly what you need, maybe you can use the implementation I proposed, and monitor your configuration file for modifications and act accordingly.
The code I proposed is the following (I did not use inotify because I am working in an NFS file system):
import imp
import time
import hashlib
import threading
import logging
logger = logging.getLogger("")
class MonitorThread(threading.Thread):
def __init__(self, engine, frequency=1):
super(MonitorThread, self).__init__()
self.engine = engine
self.frequency = frequency
# daemonize the thread so that it ends with the master program
self.daemon = True
def run(self):
while True:
with open(self.engine.source, "rb") as fp:
fingerprint = hashlib.sha1(fp.read()).hexdigest()
if not fingerprint == self.engine.fingerprint:
self.engine.notify(fingerprint)
time.sleep(self.frequency)
class Engine(object):
def __init__(self, source):
# store the path to the engine source
self.source = source
# load the module for the first time and create a fingerprint
# for the file
self.mod = imp.load_source("source", self.source)
with open(self.source, "rb") as fp:
self.fingerprint = hashlib.sha1(fp.read()).hexdigest()
# turn on monitoring thread
monitor = MonitorThread(self)
monitor.start()
def notify(self, fingerprint):
logger.info("received notification of fingerprint change ({0})".\
format(fingerprint))
self.fingerprint = fingerprint
self.mod = imp.load_source("source", self.source)
def __getattr__(self, attr):
return getattr(self.mod, attr)
def main():
logging.basicConfig(level=logging.INFO,
filename="hotswap.log")
engine = Engine("engine.py")
# this silly loop is a sample of how the program can be running in
# one thread and the monitoring is performed in another.
while True:
engine.f1()
engine.f2()
time.sleep(1)
if __name__ == "__main__":
main()

How to attach debugger to a python subproccess?

I need to debug a child process spawned by multiprocessing.Process(). The pdb degugger seems to be unaware of forking and unable to attach to already running processes.
Are there any smarter python debuggers which can be attached to a subprocess?
I've been searching for a simple to solution for this problem and came up with this:
import sys
import pdb
class ForkedPdb(pdb.Pdb):
"""A Pdb subclass that may be used
from a forked multiprocessing child
"""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
Use it the same way you might use the classic Pdb:
ForkedPdb().set_trace()
Winpdb is pretty much the definition of a smarter Python debugger. It explicitly supports going down a fork, not sure it works nicely with multiprocessing.Process() but it's worth a try.
For a list of candidates to check for support of your use case, see the list of Python Debuggers in the wiki.
This is an elaboration of Romuald's answer which restores the original stdin using its file descriptor. This keeps readline working inside the debugger. Besides, pdb special management of KeyboardInterrupt is disabled, in order it not to interfere with multiprocessing sigint handler.
class ForkablePdb(pdb.Pdb):
_original_stdin_fd = sys.stdin.fileno()
_original_stdin = None
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
current_stdin = sys.stdin
try:
if not self._original_stdin:
self._original_stdin = os.fdopen(self._original_stdin_fd)
sys.stdin = self._original_stdin
self.cmdloop()
finally:
sys.stdin = current_stdin
Building upon #memplex idea, I had to modify it to get it to work with joblib by setting the sys.stdin in the constructor as well as passing it directly along via joblib.
import os
import pdb
import signal
import sys
import joblib
_original_stdin_fd = None
class ForkablePdb(pdb.Pdb):
_original_stdin = None
_original_pid = os.getpid()
def __init__(self):
pdb.Pdb.__init__(self)
if self._original_pid != os.getpid():
if _original_stdin_fd is None:
raise Exception("Must set ForkablePdb._original_stdin_fd to stdin fileno")
self.current_stdin = sys.stdin
if not self._original_stdin:
self._original_stdin = os.fdopen(_original_stdin_fd)
sys.stdin = self._original_stdin
def _cmdloop(self):
try:
self.cmdloop()
finally:
sys.stdin = self.current_stdin
def handle_pdb(sig, frame):
ForkablePdb().set_trace(frame)
def test(i, fileno):
global _original_stdin_fd
_original_stdin_fd = fileno
while True:
pass
if __name__ == '__main__':
print "PID: %d" % os.getpid()
signal.signal(signal.SIGUSR2, handle_pdb)
ForkablePdb().set_trace()
fileno = sys.stdin.fileno()
joblib.Parallel(n_jobs=2)(joblib.delayed(test)(i, fileno) for i in range(10))
remote-pdb can be used to debug sub-processes. After installation, put the following lines in the code you need to debug:
import remote_pdb
remote_pdb.set_trace()
remote-pdb will print a port number which will accept a telnet connection for debugging that specific process. There are some caveats around worker launch order, where stdout goes when using various frontends, etc. To ensure a specific port is used (must be free and accessible to the current user), use the following instead:
from remote_pdb import RemotePdb
RemotePdb('127.0.0.1', 4444).set_trace()
remote-pdb may also be launched via the breakpoint() command in Python 3.7.
Just use PuDB that gives you an awesome TUI (GUI on terminal) and supports multiprocessing as follow:
from pudb import forked; forked.set_trace()
An idea I had was to create "dummy" classes to fake the implementation of the methods you are using from multiprocessing:
from multiprocessing import Pool
class DummyPool():
#staticmethod
def apply_async(func, args, kwds):
return DummyApplyResult(func(*args, **kwds))
def close(self): pass
def join(self): pass
class DummyApplyResult():
def __init__(self, result):
self.result = result
def get(self):
return self.result
def foo(a, b, switch):
# set trace when DummyPool is used
# import ipdb; ipdb.set_trace()
if switch:
return b - a
else:
return a - b
if __name__ == '__main__':
xml = etree.parse('C:/Users/anmendoza/Downloads/jim - 8.1/running-config.xml')
pool = DummyPool() # switch between Pool() and DummyPool() here
results = []
results.append(pool.apply_async(foo, args=(1, 100), kwds={'switch': True}))
pool.close()
pool.join()
results[0].get()
Here is the version of the ForkedPdb(Romuald's Solution) which will work for Windows and *nix based systems.
import sys
import pdb
import win32console
class MyHandle():
def __init__(self):
self.screenBuffer = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
def readline(self):
return self.screenBuffer.ReadConsole(1000)
class ForkedPdb(pdb.Pdb):
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
if sys.platform == "win32":
sys.stdin = MyHandle()
else:
sys.stdin = open('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
The problem here is that Python always connects sys.stdin in the child process to os.devnull to avoid contention for the stream. But this means that when the debugger (or a simple input()) tries to connect to stdin to get input from the user, it immediately reaches end-of-file and reports an error.
One solution, at least if you don't expect multiple debuggers to run at the same time, is to reopen stdin in the child process. That can be done by setting sys.stdin to open(0), which always opens the active terminal. This in fact is what the ForkedPdb solution does, but it can be done more simply and in an os-independent manner like this:
import multiprocessing, sys
def main():
process = multiprocessing.Process(target=worker)
process.start()
process.join()
def worker():
# Python automatically closes sys.stdin for the subprocess, so we reopen
# stdin. This enables pdb to connect to the terminal and accept commands.
# See https://stackoverflow.com/a/30149635/3830997.
sys.stdin = open(0) # or os.fdopen(0)
print("Hello from the subprocess.")
breakpoint() # or import pdb; pdb.set_trace()
print("Exited from breakpoint in the subprocess.")
if __name__ == '__main__':
main()
If you are on a supported platform, try DTrace. Most of the BSD / Solaris / OS X family support DTrace.
Here is an intro by the author. You can use Dtrace to debug just about anything.
Here is a SO post on learning DTrace.

How to capture frames from Apple iSight using Python and PyObjC?

I am trying to capture a single frame from the Apple iSight camera built into a Macbook Pro using Python (version 2.7 or 2.6) and the PyObjC (version 2.2).
As a starting point, I used this old StackOverflow question. To verify that it makes sense, I cross-referenced against Apple's MyRecorder example that it seems to be based on. Unfortunately, my script does not work.
My big questions are:
Am I initializing the camera correctly?
Am I starting the event loop correctly?
Was there any other setup I was supposed to do?
In the example script pasted below, the intended operation is that after calling startImageCapture(), I should start printing "Got a frame..." messages from the CaptureDelegate. However, the camera's light never turns on and the delegate's callback is never executed.
Also, there are no failures during startImageCapture(), all functions claim to succeed, and it successfully finds the iSight device. Analyzing the session object in pdb shows that it has valid input and output objects, the output has a delegate assigned, the device is not in use by another processes, and the session is marked as running after startRunning() is called.
Here's the code:
#!/usr/bin/env python2.7
import sys
import os
import time
import objc
import QTKit
import AppKit
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
objc.setVerbose(True)
class CaptureDelegate(NSObject):
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
# This should get called for every captured frame
print "Got a frame: %s" % videoFrame
class QuitClass(NSObject):
def quitMainLoop_(self, aTimer):
# Just stop the main loop.
print "Quitting main loop."
AppHelper.stopEventLoop()
def startImageCapture():
error = None
# Create a QT Capture session
session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
delegate = CaptureDelegate.alloc().init()
output.setDelegate_(delegate)
if not session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
session.startRunning()
def main():
# Open camera and start capturing frames
startImageCapture()
# Setup a timer to quit in 10 seconds (hack for now)
quitInst = QuitClass.alloc().init()
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(10.0,
quitInst,
'quitMainLoop:',
None,
False)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "After event loop"
if __name__ == "__main__":
main()
Thanks for any help you can provide!
OK, I spent a day diving through the depths of PyObjC and got it working.
For future record, the reason the code in the question did not work: variable scope and garbage collection. The session variable was deleted when it fell out of scope, which happened before the event processor ran. Something must be done to retain it so it is not freed before it has time to run.
Moving everything into a class and making session a class variable made the callbacks start working. Additionally, the code below demonstrates getting the frame's pixel data into bitmap format and saving it via Cocoa calls, and also how to copy it back into Python's world-view as a buffer or string.
The script below will capture a single frame
#!/usr/bin/env python2.7
#
# camera.py -- by Trevor Bentley (02/04/2011)
#
# This work is licensed under a Creative Commons Attribution 3.0 Unported License.
#
# Run from the command line on an Apple laptop running OS X 10.6, this script will
# take a single frame capture using the built-in iSight camera and save it to disk
# using three methods.
#
import sys
import os
import time
import objc
import QTKit
from AppKit import *
from Foundation import NSObject
from Foundation import NSTimer
from PyObjCTools import AppHelper
class NSImageTest(NSObject):
def init(self):
self = super(NSImageTest, self).init()
if self is None:
return None
self.session = None
self.running = True
return self
def captureOutput_didOutputVideoFrame_withSampleBuffer_fromConnection_(self, captureOutput,
videoFrame, sampleBuffer,
connection):
self.session.stopRunning() # I just want one frame
# Get a bitmap representation of the frame using CoreImage and Cocoa calls
ciimage = CIImage.imageWithCVImageBuffer_(videoFrame)
rep = NSCIImageRep.imageRepWithCIImage_(ciimage)
bitrep = NSBitmapImageRep.alloc().initWithCIImage_(ciimage)
bitdata = bitrep.representationUsingType_properties_(NSBMPFileType, objc.NULL)
# Save image to disk using Cocoa
t0 = time.time()
bitdata.writeToFile_atomically_("grab.bmp", False)
t1 = time.time()
print "Cocoa saved in %.5f seconds" % (t1-t0)
# Save a read-only buffer of image to disk using Python
t0 = time.time()
bitbuf = bitdata.bytes()
f = open("python.bmp", "w")
f.write(bitbuf)
f.close()
t1 = time.time()
print "Python saved buffer in %.5f seconds" % (t1-t0)
# Save a string-copy of the buffer to disk using Python
t0 = time.time()
bitbufstr = str(bitbuf)
f = open("python2.bmp", "w")
f.write(bitbufstr)
f.close()
t1 = time.time()
print "Python saved string in %.5f seconds" % (t1-t0)
# Will exit on next execution of quitMainLoop_()
self.running = False
def quitMainLoop_(self, aTimer):
# Stop the main loop after one frame is captured. Call rapidly from timer.
if not self.running:
AppHelper.stopEventLoop()
def startImageCapture(self, aTimer):
error = None
print "Finding camera"
# Create a QT Capture session
self.session = QTKit.QTCaptureSession.alloc().init()
# Find iSight device and open it
dev = QTKit.QTCaptureDevice.defaultInputDeviceWithMediaType_(QTKit.QTMediaTypeVideo)
print "Device: %s" % dev
if not dev.open_(error):
print "Couldn't open capture device."
return
# Create an input instance with the device we found and add to session
input = QTKit.QTCaptureDeviceInput.alloc().initWithDevice_(dev)
if not self.session.addInput_error_(input, error):
print "Couldn't add input device."
return
# Create an output instance with a delegate for callbacks and add to session
output = QTKit.QTCaptureDecompressedVideoOutput.alloc().init()
output.setDelegate_(self)
if not self.session.addOutput_error_(output, error):
print "Failed to add output delegate."
return
# Start the capture
print "Initiating capture..."
self.session.startRunning()
def main(self):
# Callback that quits after a frame is captured
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(0.1,
self,
'quitMainLoop:',
None,
True)
# Turn on the camera and start the capture
self.startImageCapture(None)
# Start Cocoa's main event loop
AppHelper.runConsoleEventLoop(installInterrupt=True)
print "Frame capture completed."
if __name__ == "__main__":
test = NSImageTest.alloc().init()
test.main()
QTKit is deprecated and PyObjC is a big dependency (and seems to be tricky to build if you want it in HomeBrew). Plus PyObjC did not have most of AVFoundation so I created a simple camera extension for Python that uses AVFoundation to record a video or snap a picture, it requires no dependencies (Cython intermediate files are committed to avoid the need to have Cython for most users).
It should be possible to build it like this:
pip install -e git+https://github.com/dashesy/pyavfcam.git
Then we can use it to take a picture:
import pyavfcam
# Open the default video source
cam = pyavfcam.AVFCam(sinks='image')
frame = cam.snap_picture('test.jpg') # frame is a memory buffer np.asarray(frame) can retrieve
Not related to this question, but if the AVFCam class is sub-classed, the overridden methods will be called with the result.

Creating a python win32 service

I am currently trying to create a win32 service using pywin32. My main point of reference has been this tutorial:
http://code.activestate.com/recipes/551780/
What i don't understand is the initialization process, since the Daemon is never initialized directly by Daemon(), instead from my understanding its initialized by the following:
mydaemon = Daemon
__svc_regClass__(mydaemon, "foo", "foo display", "foo description")
__svc_install__(mydaemon)
Where svc_install, handles the initalization, by calling Daemon.init() and passing some arguments to it.
But how can i initialize the daemon object, without initalizing the service? I want to do a few things, before i init the service. Does anyone have any ideas?
class Daemon(win32serviceutil.ServiceFramework):
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
self.run()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def start(self):
pass
def stop(self):
self.SvcStop()
def run(self):
pass
def __svc_install__(cls):
win32api.SetConsoleCtrlHandler(lambda x: True, True)
try:
win32serviceutil.InstallService(
cls._svc_reg_class_,
cls._svc_name_,
cls._svc_display_name_,
startType = win32service.SERVICE_AUTO_START
)
print "Installed"
except Exception, err:
print str(err)
def __svc_regClass__(cls, name, display_name, description):
#Bind the values to the service name
cls._svc_name_ = name
cls._svc_display_name_ = display_name
cls._svc_description_ = description
try:
module_path = sys.modules[cls.__module__].__file__
except AttributeError:
from sys import executable
module_path = executable
module_file = os.path.splitext(os.path.abspath(module_path))[0]
cls._svc_reg_class_ = '%s.%s' % (module_file, cls.__name__)
I just create a simple "how to" where the program is in one module and the service is in another place, it uses py2exe to create the win32 service, which I believe is the best you can do for your users that don't want to mess with the python interpreter or other dependencies.
You can check my tutorial here: Create win32 services using Python and py2exe
I've never used these APIs, but digging through the code, it looks like the class passed in is used to register the name of the class in the registry, so you can't do any initialization of your own. But there's a method called GetServiceCustomOption that may help:
http://mail.python.org/pipermail/python-win32/2006-April/004518.html

Categories

Resources