subprocess.wait(timeout=15) is not working - python

Code:
import os
import subprocess
execs = ['C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof2.exe', # --> Child 1
'C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof5.exe', # --> Child 2
'C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof10.exe',...] # --> Child 3 and more
print('Parent Process id : ', os.getpid())
process = [subprocess.Popen(exe) for exe in execs]
for proc in process:
try:
proc.wait(timeout=15)
print('Child Process id : ', proc.pid)
if proc.returncode == 0:
print(proc.pid, 'Exited')
except subprocess.TimeoutExpired:
proc.terminate()
print('Child Process with pid',proc.pid ,'is killed')
Some Child Processes will take more than 15 sec to execute. So, I have to kill the process which timeout. But proc.wait(timeout=15) is not raising an exception instead it executes the process.
I also tried [subprocess.Popen(exe, timeout=15) for exe in execs] but got
Error:
TypeError: __init__() got an unexpected keyword argument 'timeout'

You're not waiting for the processes in parallel - you're giving them each a sequential 15 seconds to do their thing.
You might want something like this to give all of them up to 15 seconds in parallel.
import os
import subprocess
import time
execs = [
"C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof2.exe",
"C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof5.exe",
"C:\\Users\\XYZ\\PycharmProjects\\Task1\\dist\\Multiof10.exe",
]
print("Parent Process id : ", os.getpid())
process = [subprocess.Popen(exe) for exe in execs]
start_time = time.time()
while True:
elapsed_time = time.time() - start_time
any_alive = False
for proc in process:
ret = proc.poll()
if ret is None: # still alive
if elapsed_time >= 15:
print("Killing:", proc.pid)
proc.terminate()
any_alive = True
if not any_alive:
break
time.sleep(1)

Related

How am I using the multiprocessing (python) module wrong?

Can someone help me figure out why the following code won't run properly? I want to spawn new processes as the previous ones finish but running this code automatically runs everything, i.e. all the jobs report finished and stopped when they arent, and their windows are open as well. Any thoughts on why is_alive() returns false when it is actually true?
import subprocess
import sys
import multiprocessing
import time
start_on = 33 #'!'
end_on = 34
num_processors = 4;
jobs = []
def createInstance():
global start_on, end_on, jobs
cmd = "python scrape.py" + " " + str(start_on) + " " + str(end_on)
print cmd
p = multiprocessing.Process(target=processCreator(cmd))
jobs.append(p)
p.start()
start_on += 1
end_on += 1
print "length of jobs is: " + str(len(jobs))
def processCreator(cmd):
subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_CONSOLE)
if __name__ == '__main__':
num_processors = input("How many instances to run simultaneously?: ")
for i in range(num_processors):
createInstance()
while len(jobs) > 0:
jobs = [job for job in jobs if job.is_alive()]
for i in range(num_processors - len(jobs)):
createInstance()
time.sleep(1)
print('*** All jobs finished ***')
Your code is spawning 2 processes on each createInstance() call, I think that's messing the is_alive() call.
p = multiprocessing.Process(target=processCreator(cmd))
This will spawn 1 process to run processCreator(cmd). Then, subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_CONSOLE) will spawn a child process to run the command. This subprocess will return immediately, so the parent process.
I think this version will work, removing the usage of multiprocess. I also have changed the cmd definition(see docs):
import subprocess
import sys
import time
start_on = 33 #'!'
end_on = 34
num_processors = 4;
jobs = []
def createInstance():
global start_on, end_on, jobs
cmd = ["python","scrape.py", str(start_on), str(end_on)]
print(str(cmd))
p = subprocess.Popen(cmd, creationflags=subprocess.CREATE_NEW_CONSOLE)
jobs.append(p)
p.start()
start_on += 1
end_on += 1
print "length of jobs is: " + str(len(jobs))
if __name__ == '__main__':
num_processors = input("How many instances to run simultaneously?: ")
for i in range(num_processors):
createInstance()
while len(jobs) > 0:
jobs = [job for job in jobs if job.poll() is None]
for i in range(num_processors - len(jobs)):
createInstance()
time.sleep(1)
print('*** All jobs finished ***')

Why main process can not exit when I use multiple processes in Python?

I hope that all child processes finished, and then main process exit, but it can not exit, why?
#!/usr/bin/env python
# coding=utf-8
import os
from multiprocessing import Manager
from multiprocessing import Pool
def write_file_name_to_queue(q, src_folder):
print('Process to write: %s' % os.getpid())
if not os.path.exists(src_folder):
print "Please input folder path"
return
for (dirpath, dirnames, filelist) in os.walk(src_folder):
for name in filelist:
if name[0] == '.':
continue
q.put(os.path.join(dirpath, name))
def read_file_name_from_queue(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__ == "__main__":
mg = Manager()
q = mg.Queue()
p = Pool()
p.apply_async(func=write_file_name_to_queue, args=(q, "./test/"))
for i in xrange(8):
p.apply_async(func=read_file_name_from_queue, args=(q,))
p.close()
p.join()
Run it and get the follow result:
➜ check python check_process.py
Process to write: 3918
Process to read: 3919
Process to read: 3920
Get ./test/a from queue.
Get ./test/b from queue.
Get ./test/c from queue.
Get ./test/e from queue.
Get ./test/f from queue.
Process to read: 3921
Process to read: 3918
The process still waits.

Processes not joining even after flushing their queues

I wrote a program using the module multiprocessing which globally executes as follows:
both a simulation and an ui processes are started.
the simulation process feeds a queue with new simulation states. If the queue is full, the simulation loop isn't blocked so it can handle possible incoming messages.
the ui process consumes the simulation queue.
after around 1 second of execution time, the ui process sends a quit event to the main process then exits the loop. Upon exiting it sends a stopped event to the main process through the _create_process()'s inner wrapper() function.
the main process receives both events in whichever order. The quit event results in the main process sending stop signals to all the child processes, while the stopped event increments a counter in the main loop which will cause it to exit after having received as many stopped events as there are processes.
the simulation process receives the stop event and exits the loop, sending in turn a stopped event to the main process.
the main process has now received 2 stopped events in total and concludes that all child processes are—on their way to be—stopped. As a result, the main loop is exited
the run() function flushes the queues which have been written by the child processes.
the child processes are being joined.
The problem is that quite often (but not always) the program will hang upon trying to join the simulation process, as per the log below.
[...]
[INFO/ui] process exiting with exitcode 0
[DEBUG/MainProcess] starting thread to feed data to pipe
[DEBUG/MainProcess] ... done self._thread.start()
[DEBUG/simulation] Queue._start_thread()
[DEBUG/simulation] doing self._thread.start()
[DEBUG/simulation] starting thread to feed data to pipe
[DEBUG/simulation] ... done self._thread.start()
[DEBUG/simulation] telling queue thread to quit
[DEBUG/MainProcess] all child processes (2) should have been stopped!
[INFO/simulation] process shutting down
[DEBUG/simulation] running all "atexit" finalizers with priority >= 0
[DEBUG/simulation] telling queue thread to quit
[DEBUG/simulation] running the remaining "atexit" finalizers
[DEBUG/simulation] joining queue thread
[DEBUG/MainProcess] joining process <Process(simulation, started)>
[DEBUG/simulation] feeder thread got sentinel -- exiting
[DEBUG/simulation] ... queue thread joined
[DEBUG/simulation] joining queue thread
Stopping the execution through a Ctrl + C in the shell results in these mangled tracebacks:
Process simulation:
Traceback (most recent call last):
Traceback (most recent call last):
File "./debug.py", line 224, in <module>
run()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/process.py", line 257, in _bootstrap
util._exit_function()
File "./debug.py", line 92, in run
process.join() #< This doesn't work.
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 312, in _exit_function
_run_finalizers()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/process.py", line 121, in join
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 252, in _run_finalizers
finalizer()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/util.py", line 185, in __call__
res = self._callback(*self._args, **self._kwargs)
res = self._popen.wait(timeout)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/popen_fork.py", line 54, in wait
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/queues.py", line 196, in _finalize_join
thread.join()
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/multiprocessing/popen_fork.py", line 30, in poll
pid, sts = os.waitpid(self.pid, flag)
KeyboardInterrupt
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/threading.py", line 1060, in join
self._wait_for_tstate_lock()
File "/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/threading.py", line 1076, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
As for the code, here is a stripped down version of it (hence why it often seems incomplete):
#!/usr/bin/env python3
import logging
import multiprocessing
import pickle
import queue
import time
from collections import namedtuple
_LOGGER = multiprocessing.log_to_stderr()
_LOGGER.setLevel(logging.DEBUG)
_BUFFER_SIZE = 4
_DATA_LENGTH = 2 ** 12
_STATUS_SUCCESS = 0
_STATUS_FAILURE = 1
_EVENT_ERROR = 0
_EVENT_QUIT = 1
_EVENT_STOPPED = 2
_MESSAGE_STOP = 0
_MESSAGE_EVENT = 1
_MESSAGE_SIMULATION_UPDATE = 2
_Message = namedtuple('_Message', ('type', 'value',))
_StopMessage = namedtuple('_StopMessage', ())
_EventMessage = namedtuple('_EventMessage', ('type', 'value',))
_SimulationUpdateMessage = namedtuple('_SimulationUpdateMessage', ('state',))
_MESSAGE_STRUCTS = {
_MESSAGE_STOP: _StopMessage,
_MESSAGE_EVENT: _EventMessage,
_MESSAGE_SIMULATION_UPDATE: _SimulationUpdateMessage
}
def run():
# Messages from the main process to the child ones.
downward_queue = multiprocessing.Queue()
# Messages from the child processes to the main one.
upward_queue = multiprocessing.Queue()
# Messages from the simulation process to the UI one.
simulation_to_ui_queue = multiprocessing.Queue(maxsize=_BUFFER_SIZE)
# Regroup all the queues that can be written by child processes.
child_process_queues = (upward_queue, simulation_to_ui_queue,)
processes = (
_create_process(
_simulation,
upward_queue,
name='simulation',
args=(
simulation_to_ui_queue,
downward_queue
)
),
_create_process(
_ui,
upward_queue,
name='ui',
args=(
upward_queue,
simulation_to_ui_queue,
downward_queue
)
)
)
try:
for process in processes:
process.start()
_main(downward_queue, upward_queue, len(processes))
finally:
# while True:
# alive_processes = tuple(process for process in processes
# if process.is_alive())
# if not alive_processes:
# break
# _LOGGER.debug("processes still alive: %s" % (alive_processes,))
for q in child_process_queues:
_flush_queue(q)
for process in processes:
_LOGGER.debug("joining process %s" % process)
# process.terminate() #< This works!
process.join() #< This doesn't work.
def _main(downward_queue, upward_queue, process_count):
try:
stopped_count = 0
while True:
message = _receive_message(upward_queue, False)
if message is not None and message.type == _MESSAGE_EVENT:
event_type = message.value.type
if event_type in (_EVENT_QUIT, _EVENT_ERROR):
break
elif event_type == _EVENT_STOPPED:
stopped_count += 1
if stopped_count >= process_count:
break
finally:
# Whatever happens, make sure that all child processes have stopped.
if stopped_count >= process_count:
return
# Send a 'stop' signal to all the child processes.
for _ in range(process_count):
_send_message(downward_queue, True, _MESSAGE_STOP)
while True:
message = _receive_message(upward_queue, False)
if (message is not None
and message.type == _MESSAGE_EVENT
and message.value.type == _EVENT_STOPPED):
stopped_count += 1
if stopped_count >= process_count:
_LOGGER.debug(
"all child processes (%d) should have been stopped!"
% stopped_count
)
break
def _simulation(simulation_to_ui_queue, downward_queue):
simulation_state = [i * 0.123 for i in range(_DATA_LENGTH)]
# When the queue is full (possibly form reaching _BUFFER_SIZE), the next
# solve is computed and kept around until the queue is being consumed.
next_solve_message = None
while True:
message = _receive_message(downward_queue, False)
if message is not None and message.type == _MESSAGE_STOP:
break
if next_solve_message is None:
# _step(simulation_state)
# Somehow the copy (pickle) seems to increase the chances for
# the issue to happen.
next_solve_message = _SimulationUpdateMessage(
state=pickle.dumps(simulation_state)
)
status = _send_message(simulation_to_ui_queue, False,
_MESSAGE_SIMULATION_UPDATE,
**next_solve_message._asdict())
if status == _STATUS_SUCCESS:
next_solve_message = None
def _ui(upward_queue, simulation_to_ui_queue, downward_queue):
time_start = -1.0
previous_time = 0.0
while True:
message = _receive_message(downward_queue, False)
if message is not None and message.type == _MESSAGE_STOP:
break
if time_start < 0:
current_time = 0.0
time_start = time.perf_counter()
else:
current_time = time.perf_counter() - time_start
message = _receive_message(simulation_to_ui_queue, False)
if current_time > 1.0:
_LOGGER.debug("asking to quit")
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_QUIT, value=None)
break
previous_time = current_time
def _create_process(target, upward_queue, name='', args=None):
def wrapper(function, upward_queue, *args, **kwargs):
try:
function(*args, **kwargs)
except Exception:
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_ERROR, value=None)
finally:
_send_message(upward_queue, True, _MESSAGE_EVENT,
type=_EVENT_STOPPED, value=None)
upward_queue.close()
process = multiprocessing.Process(
target=wrapper,
name=name,
args=(target, upward_queue) + args,
kwargs={}
)
return process
def _receive_message(q, block):
try:
message = q.get(block=block)
except queue.Empty:
return None
return message
def _send_message(q, block, message_type, **kwargs):
message_value = _MESSAGE_STRUCTS[message_type](**kwargs)
try:
q.put(_Message(type=message_type, value=message_value), block=block)
except queue.Full:
return _STATUS_FAILURE
return _STATUS_SUCCESS
def _flush_queue(q):
try:
while True:
q.get(block=False)
except queue.Empty:
pass
if __name__ == '__main__':
run()
Related questions on StackOverflow and hints in Python's doc basically boil down to needing to flush the queues before joining the processes, which I believe I've been trying to do here. I realize that the simulation queue could still be trying to push the (potentially large) buffered data onto the pipe by the time the program would try to flush them upon exiting, and thus ending up with still non-empty queues. This is why I tried to ensure that all the child processes were stopped before reaching this point. Now, looking at the log above and at the additional log outputted after uncommenting the while True loop checking for alive processes, it appears that the simulation process simply doesn't want to completely shut down even though its target function definitely exited. Could this be the reason of my problem?
If so, how am I suppsoed to deal with it cleanly? Otherwise, what am I missing here?
Tested with Python 3.4 on Mac OS X 10.9.5.
PS: I'm wondering if this couldn't be related to this bug ?
Sounds like the issue was indeed due to some delay in pushing the data through the queue, causing the flushes to be ineffective because fired too early.
A simple while process.is_alive(): flush_the_queues() seems to do the trick!
Lately I have run into a similar use case like yours: multiple processes (up to 11), one input queue, one output queue. But very heavy output queue.
I was getting an overhead of up to 5 seconds (!) using your suggestion to perform while process.is_alive(): flush_the_queues() before the process.join().
I've reduced that overhead down to 0.7 seconds by relying on a multiprocessing.Manager.list instead of a multiprocessing.Queue for the output queue. The multiprocessing.Manager.list doesn't need any flushing. I might consider also finding an alternative to the input queue if I can..
Full example here:
import multiprocessing
import queue
import time
PROCESSES = multiprocessing.cpu_count() - 1
processes = []
def run():
start = time.time()
input_queue = multiprocessing.Queue()
feed_input_queue(input_queue)
with multiprocessing.Manager() as manager:
output_list = manager.list()
for _ in range(PROCESSES):
p = multiprocessing.Process(target=_execute, args=(input_queue, output_list))
processes.append(p)
p.start()
print(f"Time to process = {time.time() - start:.10f}")
start = time.time()
for p in processes:
while p.is_alive(): # in principle we could get rid of this if we find an alternative to the output queue
_flush_queue(input_queue)
p.join()
print(f"Time to join = {time.time() - start:.10f}")
# from here you can do something with the output_list
def _feed_input_queue(input_queue):
for i in range(10000):
input_queue.put(i)
def _execute(input_queue: multiprocessing.Queue, output_list: list):
while not input_queue.empty():
input_item = input_queue.get()
output_list.append(do_and_return_something_heavy(input_item))
return True
def _flush_queue(q):
try:
while True:
q.get(block=False)
except queue.Empty:
pass
def do_and_return_something_heavy(input_item):
return str(input_item) * 100000
if __name__ == '__main__':
run()
Output
Time to process = 0.1855618954
Time to join = 0.6889970303
Tested on Python 3.6.

kill threads later a time in python

I have a python code with threads, and i need that if in for example 1 hour the threads are not finished, finish all threads and finish the script, and if the hour are not complete wait that all my threads finish.
I try with a daemon thread, and with a sleep of the hour, and if the hour is complete use a: sys.exit() but it not works to me, because always wait to my sleep threadh, then my script wait until the thread finished and the sys.exit() does not work.
import socket, threading, time, sys
from sys import argv
import os
acc_time=0
transactions_ps=5
ins = open(sys.argv[1],'r')
msisdn_list = []
for line in ins:
msisdn_list.append (line.strip('\n'))
# print line
ins.close()
def worker(msisdn_list):
semaphore.acquire()
global transactions_ps
print " ***** ", threading.currentThread().getName(), "Lanzado"
count=1
acc_time=0
print "len: ",len(msisdn_list)
for i in msisdn_list:
try:
init=time.time()
time.sleep(2)
print "sleeping...",i
time.sleep(4)
final=time.time()
acc_time = acc_time+final-init
print acc_time
except IOError:
print "Connection failed",sys.exc_info()[0]
print "Deteniendo ",threading.currentThread().getName()
semaphore.release()
def kill_process(secs_to_die):
time.sleep(secs_to_die)
sys.exit()
seconds_to_die=3600
thread_kill = threading.Thread(target = kill_process, args=(seconds_to_die,))
thread_kill.start()
max_con=5
semaphore = threading.BoundedSemaphore(max_con)
for i in range(0,28,transactions_ps):
w = threading.Thread(target=worker, args=(msisdn_list[i:i+transactions_ps-1],))
w.setDaemon(True)
w.start()
How can to do it
A minimal change to your code that would fix the issue is threading.Barrier:
barrier = Barrier(number_of_threads, timeout=3600)
# create (number_of_threads - 1) threads, pass them barrier
# each thread calls barrier.wait() on exit
barrier.wait() # after number_of_threads .wait() calls or on timeout it returns
A simpler alternative is to use multiprocessing.dummy.Pool that creates daemon threads:
from multiprocessing.dummy import Pool # use threads
start = timer()
endtime = start + 3600
for result in pool.imap_unordered(work, args):
if timer() > endtime:
exit("timeout")
The code doesn't timeout until a work item is done i.e., it expects that processing a single item from the list doesn't take long.
Complete example:
#!/usr/bin/env python3
import logging
import multiprocessing as mp
from multiprocessing.dummy import Pool
from time import monotonic as timer, sleep
info = mp.get_logger().info
def work(i):
info("start %d", i)
sleep(1)
info("end %d", i)
seconds_to_die = 3600
max_con = 5
mp.log_to_stderr().setLevel(logging.INFO) # enable logging
pool = Pool(max_con) # no more than max_con at a time
start = timer()
endtime = start + seconds_to_die
for _ in pool.imap_unordered(work, range(10000)):
if timer() > endtime:
exit("timeout")
You may refer to this implementation of KThread:
http://python.todaysummary.com/q_python_45717.html

wait subprocess execution in multiprocessing Pool

I'm starting pool of workers and submitting jobs to this pool. Each process creates subprocess with browser, waits page loading and then takes a screenshot. Sometimes Opera shows crash dialog for incorrect terminated session. For avoiding this I'm killing tab through xkill and waiting browser termination. Now I need to make correct handling for SIGTERM signal. After signal was set and handled in a sig_handler function, I prevent submitting new jobs with pool.close() and waiting pool termination with pool.join(). When pool not running any subproccesses main process terminates normally, but when pool has a subprocess all worker processes terminates without waiting browser termination. How can I normally terminate my main process?
#!/usr/bin/env python
#
# http://bugs.python.org/issue6766 in functions manager data packed by pickle
#
import redis
import pickle
import getopt
import time
import logging
import os
import sys
import pwd
import subprocess
import re
import urllib2
import signal
import multiprocessing
import httplib
# Define regexps
xvfb_reg = re.compile(r'Xvfb :(\d+)')
browser_reg = re.compile(r'0x(\d+) .* \("opera" "Opera"\) 1024x768')
running = True
def sig_handler(signum, frame):
"""
Set termination flag
"""
global running
running = False
return
def check_url_code(url):
"""
Try fetch url before processing.
Return True if returned request code is 200 OK else False
"""
try:
url = urllib2.urlopen(url)
code = url.getcode()
if code == 200:
return True
else:
return False
except (urllib2.URLError, httplib.InvalidURL, ValueError):
return False
def list_display():
"""
Get working virtual framebuffers
"""
proc = subprocess.Popen(['/bin/ps', 'ax'], stdout=subprocess.PIPE)
return xvfb_reg.findall(proc.communicate()[0])
def get_display(queue, lock):
"""
Get display for opera instance.
"""
while True:
lock.acquire()
_queue = pickle.loads(queue['q'])
free = list(set(_queue['displays']).difference(_queue['locked_displays']))
if len(free):
_queue['locked_displays'].append(free[0])
queue['q'] = pickle.dumps(_queue)
lock.release()
return free[0]
lock.release()
time.sleep(3)
def get_screenshot(data, display):
"""
Fork background opera process and then search window with url.
Wait for 30 seconds and take screenshot of the window.
xkill killing opera window, cuz without opened tabs opera will be terminated.
"""
try:
os.remove('.opera/{0}/sessions/autosave.win'.format(display))
except:
pass
proc = subprocess.Popen(['/usr/bin/opera', '-geometry', '1024x768+0+0', '-fullscreen', '-display', ':{0}'.format(display), '-pd', '.opera/{0}'.format(display), data['url']])
time.sleep(10)
if int(data['size']) == 120:
geometry = '120x90'
elif int(data['size']) == 240:
geometry = '240x151'
elif int(data['size']) == 400:
geometry = '400x300'
try:
os.makedirs(data['path'])
except OSError:
pass
xwin_proc = subprocess.Popen(['/usr/bin/xwininfo', '-display', ':{0}'.format(display), '-root', '-tree'], stdout=subprocess.PIPE)
xwin_info = xwin_proc.communicate()[0]
window = browser_reg.findall(xwin_info)[0]
time.sleep(5)
pimport = subprocess.Popen(['/usr/bin/import', '-display', ':{0}'.format(display), '-window', 'root', '-resize', geometry, data['file']], stdout=subprocess.PIPE)
pimport.wait()
logging.info('Screenshot {0} for {1}: display={2}, window=0x{3}, file={4}'.format(geometry, data['url'], display, window, data['file']))
pxkill = subprocess.Popen(['/usr/bin/xkill', '-display', ':{0}'.format(display), '-id', '0x{0}'.format(window)])
proc.wait()
def worker_process(data, display, lock, connection, queue):
"""
Return data for callback function for freeing display and url
"""
get_screenshot(data, display)
lock.acquire()
_queue = pickle.loads(queue['q'])
_queue['locked_displays'].remove(display)
queue['q'] = pickle.dumps(_queue)
lock.release()
connection.hdel('jobs', data['md5_url'])
connection.hincrby('stats', 'completed', 1)
return
def main(pool, queue, lock, connection, job):
"""
Checking for file has been created early in another queue, url and url locks
"""
data = pickle.loads(job)
if os.path.isfile(data['path']):
connection.hdel('jobs', data['md5_url'])
return
lock.acquire()
_queue = pickle.loads(queue['q'])
if not check_url_code(data['url']):
logging.error('Error fetching {0}'.format(data['url']))
lock.release()
connection.hdel('jobs', data['md5_url'])
return
lock.release()
display = get_display(queue, lock)
pool.apply_async(worker_process, args = (data, display, lock, connection, queue))
def create_daemon(home):
try:
pid = os.fork()
except OSError:
sys.exit('Can not demonize process')
if pid == 0:
os.setsid()
try:
pid = os.fork()
except OSError:
sys.exit('Can not demonize process')
if pid == 0:
os.chdir(home)
os.umask(0)
else:
os._exit(0)
else:
os._exit(0)
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 1024
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
pass
if hasattr(os, 'devnull'):
console = os.devnull
else:
console = '/dev/null'
os.open(console, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
return (0)
def help():
print """
Usage: {0} --u screenshot -l /var/log/screenshot/server.log -p /var/run/screenshot.pid
--user Set unprivileged user for process. This user can't be nobody, because script
-u reads home directory from passwd and uses it for Chrome user data dirs.
--log Set log file.
-l
--pid Set pid file.
-p
--help This help.
-h
""".format(sys.argv[0])
if __name__ == '__main__':
log_file = '/var/log/screenshot/server.log'
pid_file = '/var/run/screenshot.pid'
user = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'l:p:u:h', ['log', 'pid', 'user', 'help'])
except getopt.GetoptError:
help()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
help()
sys.exit()
elif opt in ('-l', '--log'):
log_file = arg
elif opt in ('-p', '--pid'):
pid_file = arg
elif opt in ('-u', '--user'):
user = arg
if user:
if not os.geteuid() == 0:
sys.exit('You need root privileges to set user')
try:
userl = pwd.getpwnam(user)
uid = userl.pw_uid
home = userl.pw_dir
except KeyError:
sys.exit('User {0} does not exist'.format(user))
os.setuid(uid)
os.chdir(home)
else:
sys.exit('You must set user')
# Fork child process for demonization
retval = create_daemon(home)
# Write pid to pidfile
pid = os.getpid()
open(pid_file, 'w').write(str(pid))
# Open logfile
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=log_file)
logging.info('Starting server with pid {0}'.format(os.getpid()))
#
# Get working displays and start subprocesses
displays = list_display()
logging.info('Found displays: {0}'.format(' '.join(displays)))
pool = multiprocessing.Pool(processes=len(displays))
queue = multiprocessing.Manager().dict()
queue['q'] = pickle.dumps({
'displays' : displays,
'termination' : False,
'locked_displays' : []})
lock = multiprocessing.Manager().Lock()
connection = redis.Redis('localhost')
# Handle termination signals
signal.signal(signal.SIGTERM, sig_handler)
while running:
job = connection.lpop('high_priority')
if job is None:
job = connection.rpop('low_priority')
if not job is None:
main(pool, queue, lock, connection, job)
else:
time.sleep(5)
logging.info('Server stopped')
pool.close()
pool.join()
os._exit(0)

Categories

Resources