When i run the python script ( BootScript.py ) on the shell it runs properly but when i try to run it through another script( automation.py ) it gets stuck
//automation.py
#!/usr/bin/env python
import sys
import optparse
import subprocess
global flag
failcount = 0
def incrfailcount():
global failcount
failcount += 1
def readfile():
fp = open('BootStrap.log','r')
print "press any key"
#_input()
for l in fp.readlines() :
if "BOOTSCRIPT SCORE IS: 3010" in l :
#import pdb
#pdb.set_trace()
global flag
flag = 0
fp.close()
parser = optparse.OptionParser()
parser.add_option('-c', '--count', dest='counter', help='no of time reboot Should Happen')
(options, args) = parser.parse_args()
#counter = 1
if options.counter is None:
counter = 1
else :
counter = options.counter
count = 0
output = ""
mylist = [ ' --cfgfile="BDXT0_PO_0.cfg"' , ' --cfgfile="BDXT0_PO_OVR_0.cfg"' ,' --scbypass' , ' --dmipy="C:\\sfd\\jg\\kdg\\dmi_pcie_po.py"', ' --fusestr="IA_CORE_DISABLE=0y111111111111111111111110"' , ' --fusestr="HT_DIS=1"' , ' --earbreakpy="C:\\dvfdfv\\dskf\\lsvcd\\config_restart.py"']
logfile = open('BootStrap.log', 'w')
#if out.__contains__('3010') :
#break
for i in range(int(counter)):
global flag
flag = 1
logfile = open('BootStrap.log', 'w')
proc = subprocess.Popen(['python' ,'bdxBootScript.py', mylist ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
sys.stdout.write(line)
logfile.write(line)
proc.wait()
count = count + 1
print "file closing "
logfile.close()
readfile()
#global flag
if flag :
incrfailcount()
continue
if flag :
print "Error Occured in %d th iteration" %count
else :
print "Every thing Went well"
if failcount >= 0 :
print "Script failed %d times of total run %d " %(failcount, count)
I am trying to automate BootScript.py
**What the Program Does ?**
Here it runs BootScript.py which arguments . the output of the bootscript.py is checked for specific line (BOOTSCRIPT SCORE IS: 3010)
If present it is asumed as to sucess else failure , this script is run for counter number of times
**What i want?**
This script gets stuck for a long time , i want it to execute with out beeing sstuck , as though i am running the bootscript manually
There are several issues e.g., Popen(['python' ,'bdxBootScript.py', mylist ]) should raise an exception because you should use Popen(['python' ,'bdxBootScript.py'] + mylist) instead. If you don't see the exception then either the code is not run e.g., counter==0 or (worse) you suppress exceptions up the stack (don't do it, at the very least you should log unexpected errors).
If bdxBootScript.py doesn't produce much output then for line in proc.stdout: may appear to do nothing for some time, to fix it pass -u flag to python to make its output unbuffered and use iter(p.stdout.readline, b'') to workaround the "hidden read-ahead buffer" bug for pipes in Python 2:
import os
import sys
from subprocess import Popen, PIPE, STDOUT
with open(os.devnull, 'rb', 0) as DEVNULL:
proc = Popen([sys.executable, '-u', 'bdxBootScript.py'] + mylist,
stdin=DEVNULL, stdout=PIPE, stderr=STDOUT, bufsize=1)
for line in iter(proc.stdout.readline, b''):
sys.stdout.write(line)
sys.stdout.flush()
logfile.write(line)
logfile.flush() # make the line available in the log immediately
proc.stdout.close()
rc = proc.wait()
Related
I have this script, it's purpose is to call an other script while with different parameters and print the output as it would be print if I called it myself :
import subprocess
def run_this(command):
print(f"running {command}")
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
retcode = p.poll()
line = p.stdout.readline()
if line:
yield line
if retcode is not None:
print(f"retcode : {retcode}")
p.stdout.close()
break
def build_command(pruned_model, prompt):
return f'python scripts/stable_txt2img.py --ddim_eta 0.0 --n_samples 1 --n_iter 4 --scale 7.0 ' \
+ f'--ddim_steps 50 --ckpt "{pruned_model}" ' \
+ f'--prompt "{prompt}" --seed 6514689'
pruned_model = r"C:\checkout2\Stable-diffusion\checkpoints\last-pruned.ckpt"
prompts = [
"a person in space",
"a person on a boat"
]
for prompt in prompts:
print("iteration")
command = build_command(pruned_model, prompt)
run_this(command)
print("done")
however the output is this :
iteration
iteration
done
Process finished with exit code 0
how is this possible? there is a print at the start of the run_this() function.
Thanks.
ps : you can pass any command to run_this(), it will never go into the function. for example, this will never print 'running toto'
import subprocess
def run_this(command):
print(f"running {command}")
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
retcode = p.poll()
line = p.stdout.readline()
if line:
yield line
if retcode is not None:
print(f"retcode : {retcode}")
p.stdout.close()
break
print("start")
run_this("toto")
print("done")
Your run_this is a generator function. Calling it doesn't actually run anything. It just creates a generator iterator. Iterating over the iterator would run the code.
I have a python subprocess that I'm trying to read output and error streams from. Currently I have it working, but I'm only able to read from stderr after I've finished reading from stdout. Here's what it looks like:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_iterator = iter(process.stdout.readline, b"")
stderr_iterator = iter(process.stderr.readline, b"")
for line in stdout_iterator:
# Do stuff with line
print line
for line in stderr_iterator:
# Do stuff with line
print line
As you can see, the stderr for loop can't start until the stdout loop completes. How can I modify this to be able to read from both in the correct order the lines come in?
To clarify: I still need to be able to tell whether a line came from stdout or stderr because they will be treated differently in my code.
The code in your question may deadlock if the child process produces enough output on stderr (~100KB on my Linux machine).
There is a communicate() method that allows to read from both stdout and stderr separately:
from subprocess import Popen, PIPE
process = Popen(command, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
If you need to read the streams while the child process is still running then the portable solution is to use threads (not tested):
from subprocess import Popen, PIPE
from threading import Thread
from Queue import Queue # Python 2
def reader(pipe, queue):
try:
with pipe:
for line in iter(pipe.readline, b''):
queue.put((pipe, line))
finally:
queue.put(None)
process = Popen(command, stdout=PIPE, stderr=PIPE, bufsize=1)
q = Queue()
Thread(target=reader, args=[process.stdout, q]).start()
Thread(target=reader, args=[process.stderr, q]).start()
for _ in range(2):
for source, line in iter(q.get, None):
print "%s: %s" % (source, line),
See:
Python: read streaming input from subprocess.communicate()
Non-blocking read on a subprocess.PIPE in python
Python subprocess get children's output to file and terminal?
Here's a solution based on selectors, but one that preserves order, and streams variable-length characters (even single chars).
The trick is to use read1(), instead of read().
import selectors
import subprocess
import sys
p = subprocess.Popen(
["python", "random_out.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
sel = selectors.DefaultSelector()
sel.register(p.stdout, selectors.EVENT_READ)
sel.register(p.stderr, selectors.EVENT_READ)
while True:
for key, _ in sel.select():
data = key.fileobj.read1().decode()
if not data:
exit()
if key.fileobj is p.stdout:
print(data, end="")
else:
print(data, end="", file=sys.stderr)
If you want a test program, use this.
import sys
from time import sleep
for i in range(10):
print(f" x{i} ", file=sys.stderr, end="")
sleep(0.1)
print(f" y{i} ", end="")
sleep(0.1)
The order in which a process writes data to different pipes is lost after write.
There is no way you can tell if stdout has been written before stderr.
You can try to read data simultaneously from multiple file descriptors in a non-blocking way
as soon as data is available, but this would only minimize the probability that the order is incorrect.
This program should demonstrate this:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import select
import subprocess
testapps={
'slow': '''
import os
import time
os.write(1, 'aaa')
time.sleep(0.01)
os.write(2, 'bbb')
time.sleep(0.01)
os.write(1, 'ccc')
''',
'fast': '''
import os
os.write(1, 'aaa')
os.write(2, 'bbb')
os.write(1, 'ccc')
''',
'fast2': '''
import os
os.write(1, 'aaa')
os.write(2, 'bbbbbbbbbbbbbbb')
os.write(1, 'ccc')
'''
}
def readfds(fds, maxread):
while True:
fdsin, _, _ = select.select(fds,[],[])
for fd in fdsin:
s = os.read(fd, maxread)
if len(s) == 0:
fds.remove(fd)
continue
yield fd, s
if fds == []:
break
def readfromapp(app, rounds=10, maxread=1024):
f=open('testapp.py', 'w')
f.write(testapps[app])
f.close()
results={}
for i in range(0, rounds):
p = subprocess.Popen(['python', 'testapp.py'], stdout=subprocess.PIPE
, stderr=subprocess.PIPE)
data=''
for (fd, s) in readfds([p.stdout.fileno(), p.stderr.fileno()], maxread):
data = data + s
results[data] = results[data] + 1 if data in results else 1
print 'running %i rounds %s with maxread=%i' % (rounds, app, maxread)
results = sorted(results.items(), key=lambda (k,v): k, reverse=False)
for data, count in results:
print '%03i x %s' % (count, data)
print
print "=> if output is produced slowly this should work as whished"
print " and should return: aaabbbccc"
readfromapp('slow', rounds=100, maxread=1024)
print
print "=> now mostly aaacccbbb is returnd, not as it should be"
readfromapp('fast', rounds=100, maxread=1024)
print
print "=> you could try to read data one by one, and return"
print " e.g. a whole line only when LF is read"
print " (b's should be finished before c's)"
readfromapp('fast', rounds=100, maxread=1)
print
print "=> but even this won't work ..."
readfromapp('fast2', rounds=100, maxread=1)
and outputs something like this:
=> if output is produced slowly this should work as whished
and should return: aaabbbccc
running 100 rounds slow with maxread=1024
100 x aaabbbccc
=> now mostly aaacccbbb is returnd, not as it should be
running 100 rounds fast with maxread=1024
006 x aaabbbccc
094 x aaacccbbb
=> you could try to read data one by one, and return
e.g. a whole line only when LF is read
(b's should be finished before c's)
running 100 rounds fast with maxread=1
003 x aaabbbccc
003 x aababcbcc
094 x abababccc
=> but even this won't work ...
running 100 rounds fast2 with maxread=1
003 x aaabbbbbbbbbbbbbbbccc
001 x aaacbcbcbbbbbbbbbbbbb
008 x aababcbcbcbbbbbbbbbbb
088 x abababcbcbcbbbbbbbbbb
This works for Python3 (3.6):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
# Read both stdout and stderr simultaneously
sel = selectors.DefaultSelector()
sel.register(p.stdout, selectors.EVENT_READ)
sel.register(p.stderr, selectors.EVENT_READ)
ok = True
while ok:
for key, val1 in sel.select():
line = key.fileobj.readline()
if not line:
ok = False
break
if key.fileobj is p.stdout:
print(f"STDOUT: {line}", end="")
else:
print(f"STDERR: {line}", end="", file=sys.stderr)
from https://docs.python.org/3/library/subprocess.html#using-the-subprocess-module
If you wish to capture and combine both streams into one, use
stdout=PIPE and stderr=STDOUT instead of capture_output.
so the easiest solution would be:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout_iterator = iter(process.stdout.readline, b"")
for line in stdout_iterator:
# Do stuff with line
print line
I know this question is very old, but this answer may help others who stumble upon this page in researching a solution for a similar situation, so I'm posting it anyway.
I've built a simple python snippet that will merge any number of pipes into a single one. Of course, as stated above, the order cannot be guaranteed, but this is as close as I think you can get in Python.
It spawns a thread for each of the pipes, reads them line by line and puts them into a Queue (which is FIFO). The main thread loops through the queue, yielding each line.
import threading, queue
def merge_pipes(**named_pipes):
r'''
Merges multiple pipes from subprocess.Popen (maybe other sources as well).
The keyword argument keys will be used in the output to identify the source
of the line.
Example:
p = subprocess.Popen(['some', 'call'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outputs = {'out': log.info, 'err': log.warn}
for name, line in merge_pipes(out=p.stdout, err=p.stderr):
outputs[name](line)
This will output stdout to the info logger, and stderr to the warning logger
'''
# Constants. Could also be placed outside of the method. I just put them here
# so the method is fully self-contained
PIPE_OPENED=1
PIPE_OUTPUT=2
PIPE_CLOSED=3
# Create a queue where the pipes will be read into
output = queue.Queue()
# This method is the run body for the threads that are instatiated below
# This could be easily rewritten to be outside of the merge_pipes method,
# but to make it fully self-contained I put it here
def pipe_reader(name, pipe):
r"""
reads a single pipe into the queue
"""
output.put( ( PIPE_OPENED, name, ) )
try:
for line in iter(pipe.readline,''):
output.put( ( PIPE_OUTPUT, name, line.rstrip(), ) )
finally:
output.put( ( PIPE_CLOSED, name, ) )
# Start a reader for each pipe
for name, pipe in named_pipes.items():
t=threading.Thread(target=pipe_reader, args=(name, pipe, ))
t.daemon = True
t.start()
# Use a counter to determine how many pipes are left open.
# If all are closed, we can return
pipe_count = 0
# Read the queue in order, blocking if there's no data
for data in iter(output.get,''):
code=data[0]
if code == PIPE_OPENED:
pipe_count += 1
elif code == PIPE_CLOSED:
pipe_count -= 1
elif code == PIPE_OUTPUT:
yield data[1:]
if pipe_count == 0:
return
This works for me (on windows):
https://github.com/waszil/subpiper
from subpiper import subpiper
def my_stdout_callback(line: str):
print(f'STDOUT: {line}')
def my_stderr_callback(line: str):
print(f'STDERR: {line}')
my_additional_path_list = [r'c:\important_location']
retcode = subpiper(cmd='echo magic',
stdout_callback=my_stdout_callback,
stderr_callback=my_stderr_callback,
add_path_list=my_additional_path_list)
I need to run an interactive Bash instance in a separated process in Python with it's own dedicated TTY (I can't use pexpect).
I used this code snippet I commonly see used in similar programs:
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
x = os.read(master, 1026)
print x
subprocess.Popen.kill(p)
os.close(master)
But when I run it I get the following output:
$ ./pty_try.py
bash: cannot set terminal process group (10790): Inappropriate ioctl for device
bash: no job control in this shell
Strace of the run shows some errors:
...
readlink("/usr/bin/python2.7", 0x7ffc8db02510, 4096) = -1 EINVAL (Invalid argument)
...
ioctl(3, SNDCTL_TMR_TIMEBASE or SNDRV_TIMER_IOCTL_NEXT_DEVICE or TCGETS, 0x7ffc8db03590) = -1 ENOTTY (Inappropriate ioctl for device)
...
readlink("./pty_try.py", 0x7ffc8db00610, 4096) = -1 EINVAL (Invalid argument)
The code snippet seems pretty straightforward, is Bash not getting something it needs? what could be the problem here?
This is a solution to run an interactive command in subprocess. It uses pseudo-terminal to make stdout non-blocking(also some command needs a tty device, eg. bash). it uses select to handle input and ouput to the subprocess.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import select
import termios
import tty
import pty
from subprocess import Popen
command = 'bash'
# command = 'docker run -it --rm centos /bin/bash'.split()
# save original tty setting then set it to raw mode
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
try:
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
p = Popen(command,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
This is the solution that worked for me at the end (as suggested by qarma) :
libc = ctypes.CDLL('libc.so.6')
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], preexec_fn=libc.setsid, stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
... do stuff here ...
x = os.read(master, 1026)
print x
Here is a full object oriented solution to do interactive shell commands with TTYs using threads and queues for stdout and stderr IO handling. This took me a while to build from multiple locations but it works perfectly so far on Unix/Linux systems and also as part of a Juniper op script. Thought I would post this here to save others time in trying to build something like this.
import pty
import re
import select
import threading
from datetime import datetime, timedelta
import os
import logging
import subprocess
import time
from queue import Queue, Empty
lib_logger = logging.getLogger("lib")
# Handler function to be run as a thread for pulling pty channels from an interactive shell
def _pty_handler(pty_master, logger, queue, stop):
poller = select.poll()
poller.register(pty_master, select.POLLIN)
while True:
# Stop handler if flagged
if stop():
logger.debug("Disabling pty handler for interactive shell")
break
fd_event = poller.poll(100)
for descriptor, event in fd_event:
# Read data from pipe and send to queue if there is data to read
if event == select.POLLIN:
data = os.read(descriptor, 1).decode("utf-8")
if not data:
break
# logger.debug("Reading in to handler queue: " + data)
queue.put(data)
# Exit handler if stdout is closing
elif event == select.POLLHUP:
logger.debug("Disabling pty handler for interactive shell")
break
# Function for reading outputs from the given queue by draining it and returning the output
def _get_queue_output(queue: Queue) -> str:
value = ""
try:
while True:
value += queue.get_nowait()
except Empty:
return value
# Helper function to create the needed list for popen and print the command run to the logger
def popen_command(command, logger, *args):
popen_list = list()
popen_list.append(command)
command_output = command
for arg in args:
popen_list.append(arg)
command_output += " " + arg
lib_logger.debug("Making Popen call using: " + str(popen_list))
logger.debug("")
logger.debug(command_output)
logger.debug("")
return popen_list
# Class for create an interactive shell and sending commands to it along with logging output to loggers
class InteractiveShell(object):
def __init__(self, command, logger, *args):
self.logger = logger
self.command = command
self.process = None
self.popen_list = popen_command(command, logger, *args)
self.master_stdout = None
self.slave_stdout = None
self.master_stderr = None
self.slave_stderr = None
self.stdout_handler = None
self.stderr_handler = None
self.stdout_queue = None
self.stderr_queue = None
self.stop_handlers = False
# Open interactive shell and setup all threaded IO handlers
def open(self, shell_prompt, timeout=DEVICE_TIMEOUT):
# Create PTYs
self.master_stdout, self.slave_stdout = pty.openpty()
self.master_stderr, self.slave_stderr = pty.openpty()
# Create shell subprocess
self.process = subprocess.Popen(self.popen_list, stdin=self.slave_stdout, stdout=self.slave_stdout,
stderr=self.slave_stderr, bufsize=0, start_new_session=True)
lib_logger.debug("")
lib_logger.debug("Started interactive shell for command " + self.command)
lib_logger.debug("")
# Create thread and queues for handling pty output and start them
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self.stdout_handler = threading.Thread(target=_pty_handler, args=(self.master_stdout,
lib_logger,
self.stdout_queue,
lambda: self.stop_handlers))
self.stderr_handler = threading.Thread(target=_pty_handler, args=(self.master_stderr,
lib_logger,
self.stderr_queue,
lambda: self.stop_handlers))
self.stdout_handler.daemon = True
self.stderr_handler.daemon = True
lib_logger.debug("Enabling stderr handler for interactive shell " + self.command)
self.stderr_handler.start()
lib_logger.debug("Enabling stdout handler for interactive shell " + self.command)
self.stdout_handler.start()
# Wait for shell prompt
lib_logger.debug("Waiting for shell prompt: " + shell_prompt)
return self.wait_for(shell_prompt, timeout)
# Close interactive shell which should also kill all threaded IO handlers
def close(self):
# Wait 5 seconds before closing to let shell handle all input and outputs
time.sleep(5)
# Stop IO handler threads and terminate the process then wait another 5 seconds for cleanup to happen
self.stop_handlers = True
self.process.terminate()
time.sleep(5)
# Check for any additional output from the stdout handler
output = ""
while True:
data = _get_queue_output(self.stdout_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.debug(line)
# Check for any additional output from the stderr handler
output = ""
while True:
data = _get_queue_output(self.stderr_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.error(line)
# Cleanup PTYs
os.close(self.master_stdout)
os.close(self.master_stderr)
os.close(self.slave_stdout)
os.close(self.slave_stderr)
lib_logger.debug("Interactive shell command " + self.command + " terminated")
# Run series of commands given as a list of a list of commands and wait_for strings. If no wait_for is needed then
# only provide the command. Return if all the commands completed successfully or not.
# Ex:
# [
# ["ssh jsas#" + vnf_ip, r"jsas#.*:"],
# ["juniper123", r"jsas#.*\$"],
# ["sudo su", r".*jsas:"],
# ["juniper123", r"root#.*#"],
# ["usermod -p 'blah' jsas"]
# ]
def run_commands(self, commands_list):
shell_status = True
for command in commands_list:
shell_status = self.run(command[0])
if shell_status and len(command) == 2:
shell_status = self.wait_for(command[1])
# Break out of running commands if a command failed
if not shell_status:
break
return shell_status
# Run given command and return False if error occurs otherwise return True
def run(self, command, sleep=0):
# Check process to make sure it is still running and if not grab the stderr output
if self.process.poll():
self.logger.error("Interactive shell command " + self.command + " closed with return code: " +
self.process.returncode)
data = _get_queue_output(self.stderr_queue)
if data != "":
self.logger.error("Interactive shell error messages:")
for line in iter(data.splitlines()):
self.logger.error(line)
return False
# Write command to process and check to make sure a newline is in command otherwise add it
if "\n" not in command:
command += "\n"
os.write(self.master_stdout, command.encode("utf-8"))
if sleep:
time.sleep(sleep)
return True
# Wait for specific regex expression in output before continuing return False if wait time expires otherwise return
# True
def wait_for(self, this, timeout=DEVICE_TIMEOUT):
timeout = datetime.now() + timedelta(seconds=timeout)
output = ""
# Keep searching for output until timeout occurs
while timeout > datetime.now():
data = _get_queue_output(self.stdout_queue)
if data != "":
# Add to output line and check for match to regex given and if match then break and send output to
# logger
output += data
lib_logger.debug("Checking for " + this + " in data: ")
for line in iter(output.splitlines()):
lib_logger.debug(line)
if re.search(r"{}\s?$".format(this), output):
break
time.sleep(1)
# Send output to logger
for line in iter(output.splitlines()):
self.logger.debug(line)
# If wait time expired print error message and return False
if timeout < datetime.now():
self.logger.error("Wait time expired when waiting for " + this)
return False
return True
I've got a python 3 script i use to backup and encrypt mysqldump files and im having a particular issues with one database that is 67gb after encryption & compression.
The mysqldump is outputting errorcode 3, so i'd like to catch the actual error message, as this could mean a couple of things.
The random thing is the backup file is the right size, so not sure what the error means. it worked once on this database...
the code looks like the below and i'd really appreciate some help on how to add non-blocking capture of stderr when the return code is anything but 0 for both p1 and p2.
Also, if im doing anything glaringly obvious wrong, please do let me know, as i'd like to make sure this is a reliable process. it has been working fine on my databases under 15gb compressed.
def dbbackup():
while True:
item = q.get()
#build up folder structure, daily, weekly, monthy & project
genfile = config[item]['DBName'] + '-' + dateyymmdd + '-'
genfile += config[item]['PubKey'] + '.sql.gpg'
if os.path.isfile(genfile):
syslog.syslog(item + ' ' + genfile + ' exists, removing')
os.remove(genfile)
syslog.syslog(item + ' will be backed up as ' + genfile)
args = ['mysqldump', '-u', config[item]['UserNm'],
'-p' + config[item]['Passwd'], '-P', config[item]['Portnu'],
'-h', config[item]['Server']]
args.extend(config[item]['MyParm'].split())
args.append(config[item]['DBName'])
p1 = subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(['gpg', '-o', genfile, '-r',
config[item]['PubKey'], '-z', '9', '--encrypt'], stdin=p1.stdout)
p2.wait()
if p2.returncode == 0:
syslog.syslog(item + ' encryption successful')
else:
syslog.syslog(syslog.LOG_CRIT, item + ' encryption failed '+str(p2.returncode))
p1.terminate()
p1.wait()
if p1.returncode == 0:
#does some uploads of the file etc..
else:
syslog.syslog(syslog.LOG_CRIT, item + ' extract failed '+str(p1.returncode))
q.task_done()
def main():
db2backup = []
for settingtest in config:
db2backup.append(settingtest)
if len(db2backup) >= 1:
syslog.syslog('Backups started')
for database in db2backup:
q.put(database)
syslog.syslog(database + ' added to backup queue')
q.join()
syslog.syslog('Backups finished')
q = queue.Queue()
config = configparser.ConfigParser()
config.read('backup.cfg')
backuptype = 'daily'
dateyymmdd = datetime.datetime.now().strftime('%Y%m%d')
for i in range(2):
t = threading.Thread(target=dbbackup)
t.daemon = True
t.start()
if __name__ == '__main__':
main()
Simplify your code:
avoid unnecessary globals, pass parameters to the corresponding functions instead
avoid reimplementing a thread pool (it hurts readability and it misses convience features accumulated over the years).
The simplest way to capture stderr is to use stderr=PIPE and .communicate() (blocking call):
#!/usr/bin/env python3
from configparser import ConfigParser
from datetime import datetime
from multiprocessing.dummy import Pool
from subprocess import Popen, PIPE
def backup_db(item, conf): # config[item] == conf
"""Run `mysqldump ... | gpg ...` command."""
genfile = '{conf[DBName]}-{now:%Y%m%d}-{conf[PubKey]}.sql.gpg'.format(
conf=conf, now=datetime.now())
# ...
args = ['mysqldump', '-u', conf['UserNm'], ...]
with Popen(['gpg', ...], stdin=PIPE) as gpg, \
Popen(args, stdout=gpg.stdin, stderr=PIPE) as db_dump:
gpg.communicate()
error = db_dump.communicate()[1]
if gpg.returncode or db_dump.returncode:
error
def main():
config = ConfigParser()
with open('backup.cfg') as file: # raise exception if config is unavailable
config.read_file(file)
with Pool(2) as pool:
pool.starmap(backup_db, config.items())
if __name__ == "__main__":
main()
NOTE: no need to call db_dump.terminate() if gpg dies prematurely: mysqldump dies when it tries to write something to the closed gpg.stdin.
If there are huge number of items in the config then you could use pool.imap() instead of pool.starmap() (the call should be modified slightly).
For robustness, wrap backup_db() function to catch and log all exceptions.
I am starting a subprocess via python and display the stdout (progress) in a Progress bar:
def rv(args):
p = subprocess.Popen(["linkto.exe"]+[x for x in args], stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if line != "":
progressStr=re.search(r"([0-9]+.[0-9]+%)", line.rstrip())
if progressStr == None:
print line.rstrip()
else:
progressInt=int(float(re.sub("[^0123456789\.]", "", progressStr.group())))
print progressInt
else:
break
As you see, progressInt is my cleaned up version of the stdout with integer values for the progress % - it works fine so far. However, depending on my input the stdout may vary because the subprocess may spawn another process after the primary one.
How could I drop all lines of my stdout after progressInt hits 100 for the first time?
I managed to find a solution via re.search. There was a small difference in the stdout of process1 (writes "Info:") and process2 (writes "Info [32]:").
def rv(args):
p = subprocess.Popen(["C:/Program Files/Tweak/RV-4.2.3-64/bin/rvio_hw.exe"]+[x for x in args], stdout=subprocess.PIPE)
for line in iter(p.stdout.readline,""):
noFFMpeg=re.search(r"INFO: (.*)", line.rstrip())
if noFFMpeg is not None:
progressStr=re.search(r"([0-9]+.[0-9]+%)", noFFMpeg.group())
if progressStr is not None:
progressInt=int(float(re.sub("[^0123456789\.]", "", progressStr.group())))
self.prog_QProgressBar.setValue(progressInt)
QtGui.QApplication.processEvents()
print progressStr.group()