I have a process I need to run in python and get the output of. How do I run it so that the user can specify optional arguments that can be run on the process. This is what my function looks like so far.
async def analyze_target(self, target, batchSize, delay, maxdepth, maxurls, maxwait, recursive, useragent, htmlmaxcols, htmlmaxrows):
cmd = "wappalyzer"
if batchSize != "":
cmd = cmd + " --batch-size=" + batchSize
if delay != "":
cmd = cmd + " --delay=" + delay
if maxdepth != "":
cmd = cmd + " --max-depth=" + maxdepth
if maxurls != "":
cmd = cmd + " --max-urls=" + maxurls
if maxwait != "":
cmd = cmd + " --max-wait=" + maxwait
if recursive == True:
cmd = cmd + " --recursive"
if useragent != "":
cmd = cmd + " --user-agent=" + useragent
if htmlmaxcols != "":
cmd = cmd + " --html-max-cols=" + htmlmaxcols
if htmlmaxrows != "":
cmd = cmd + " --html-max-rows=" + htmlmaxrows
cmd = cmd + " " + target
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
tmp = p.stdout.read()
self.logger.info(tmp)
p_status = p.wait()
"""
Returns log of what was wappalyzed
"""
message = f"target {target} has been wappalyzed with output {tmp}"
# This logs to the docker logs
self.logger.info(message)
return tmp
The first argument of Popen, args, supports a single string or a Sequence. So you could append Optional arguments to a list for example
cmd = [cmd, *args]
Or you could also use **kwargs (dictionary displays) (which basically treat any named argument as a dictionary) in the arguments of your function and do something like this:
def analyze_target(..., **kwargs):
...
for key, value, in kwargs.items():
cmd += f" --{key.replace('_', '-')}={value}" # change name since dashes cannot be used as names
...
# would look like this:
analyze_target(..., additional_argument='test')
# cmd: all other arguments + --additional-argument=test
Do not build a string. Simply build a list containing the name of the command and its arguments as separate elements.
async def analyze_target(self, target, batchSize, delay, maxdepth, maxurls, maxwait, recursive, useragent, htmlmaxcols, htmlmaxrows):
cmd = ["wappalyzer"]
if batchSize != "":
cmd.append("--batch-size=" + batchSize)
if delay != "":
cmd.append("--delay=" + delay)
if maxdepth != "":
cmd.append("--max-depth=" + maxdepth)
if maxurls != "":
cmd.append("--max-urls=" + maxurls)
if maxwait != "":
cmd.append("--max-wait=" + maxwait)
if recursive:
cmd.append("--recursive")
if useragent != "":
cmd.append("--user-agent=" + useragent)
if htmlmaxcols != "":
cmd.append("--html-max-cols=" + htmlmaxcols)
if htmlmaxrows != "":
cmd.append("--html-max-rows=" + htmlmaxrows)
cmd.append(target)
# Drop the shell=True, so that command is executed
# directly, without shell involvement
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
...
Related
I'm using sphinx to build my own model for vocal recognition , i've followed the tutorial step by step and all works fine until that point when i should run the python script of sphinixtrain (whose role is to execute a set of perl files throw terminal in normal case) but for me, the program only opens the files one by one with a chosen editor without executing them !(watching other tutorials videos, the code bellow works normal)
The code of trainer :
#!/usr/bin/python
from __future__ import print_function
import getopt, sys, os
training_basedir = ""
sphinxbinpath = ""
sphinxpath = ""
def find_paths():
global training_basedir
global sphinxbinpath
global sphinxpath
# Find the location of the files, it can be libexec or lib or lib64
currentpath = os.path.dirname(os.path.realpath(__file__))
sphinxbinpath = os.path.realpath(currentpath + "/../libexec/sphinxtrain")
if os.path.exists(currentpath + "/../lib/sphinxtrain/bw"):
sphinxbinpath = os.path.realpath(currentpath + "/../lib/sphinxtrain/bw")
if os.path.exists(currentpath + "/../bin/Release/Win32"):
sphinxbinpath = os.path.realpath(currentpath + "/../bin/Release/Win32")
# Find the location for the libraries
sphinxpath = os.path.realpath(currentpath + "/../lib/sphinxtrain")
if os.path.exists(currentpath + "/../lib64/sphinxtrain/scripts/00.verify"):
sphinxpath = os.path.realpath(currentpath + "/../lib64/sphinxtrain")
if os.path.exists(currentpath + "/../scripts/00.verify"):
sphinxpath = os.path.realpath(currentpath + "/..")
if not (os.path.exists(sphinxbinpath + "/bw") or os.path.exists(sphinxbinpath + "/bw.exe")):
print("Failed to find sphinxtrain binaries. Check your installation")
exit(1)
# Perl script want forward slashes
training_basedir = os.getcwd().replace('\\', '/');
sphinxpath = sphinxpath.replace('\\','/')
sphinxbinpath = sphinxbinpath.replace('\\','/')
print("Sphinxtrain path:", sphinxpath)
print("Sphinxtrain binaries path:", sphinxbinpath)
def setup(task):
if not os.path.exists("etc"):
os.mkdir("etc")
print("Setting up the database " + task)
out_cfg = open("./etc/sphinx_train.cfg", "w")
for line in open(sphinxpath + "/etc/sphinx_train.cfg", "r"):
line = line.replace("___DB_NAME___", task)
line = line.replace("___BASE_DIR___", training_basedir)
line = line.replace("___SPHINXTRAIN_DIR___", sphinxpath)
line = line.replace("___SPHINXTRAIN_BIN_DIR___", sphinxbinpath)
out_cfg.write(line)
out_cfg.close()
out_cfg = open("etc/feat.params", "w")
for line in open(sphinxpath + "/etc/feat.params", "r"):
out_cfg.write(line)
out_cfg.close()
steps = [
"000.comp_feat/slave_feat.pl",
"00.verify/verify_all.pl",
"0000.g2p_train/g2p_train.pl",
"01.lda_train/slave_lda.pl",
"02.mllt_train/slave_mllt.pl",
"05.vector_quantize/slave.VQ.pl",
"10.falign_ci_hmm/slave_convg.pl",
"11.force_align/slave_align.pl",
"12.vtln_align/slave_align.pl",
"20.ci_hmm/slave_convg.pl",
"30.cd_hmm_untied/slave_convg.pl",
"40.buildtrees/slave.treebuilder.pl",
"45.prunetree/slave.state-tying.pl",
"50.cd_hmm_tied/slave_convg.pl",
"60.lattice_generation/slave_genlat.pl",
"61.lattice_pruning/slave_prune.pl",
"62.lattice_conversion/slave_conv.pl",
"65.mmie_train/slave_convg.pl",
"90.deleted_interpolation/deleted_interpolation.pl",
"decode/slave.pl",
]
def run_stages(stages):
for stage in stages.split(","):
for step in steps:
name = step.split("/")[0].split(".")[-1]
if name == stage:
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def run_from(stage):
found = False
for step in steps:
name = step.split("/")[0].split(".")[-1]
if name == stage or found:
found = True
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def run():
print("Running the training")
for step in steps:
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def usage():
print ("")
print ("Sphinxtrain processes the audio files and creates and acoustic model ")
print ("for CMUSphinx toolkit. The data needs to have a certain layout ")
print ("See the tutorial http://cmusphinx.sourceforge.net/wiki/tutorialam ")
print ("for details")
print ("")
print ("Usage: sphinxtrain [options] <command>")
print ("")
print ("Commands:")
print (" -t <task> setup - copy configuration into database")
print (" [-s <stage1,stage2,stage3>] [-f <stage>] run - run the training or just selected
stages")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ht:s:f:", ["help", "task", "stages", "from"])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(-1)
task = None
stages = None
from_stage = None
for o, a in opts:
if o in ("-t", "--task"):
task = a
if o in ("-f", "--from"):
from_stage = a
if o in ("-s", "--stages"):
stages = a
if o in ("-h", "--help"):
usage()
if len(args) == 0:
usage()
sys.exit(-1)
command = args[0]
find_paths()
if command == "setup":
if task == None:
print("No task name defined")
sys.exit(-1)
setup(task)
elif command == "run":
if stages != None:
run_stages(stages)
elif from_stage != None:
run_from(from_stage)
else:
run()
else:
run()
if __name__ == "__main__":
main()
Another way to solve this is to be explicit about calling the Perl interpreter in your os.system call
i.e.
ret = os.system('perl ' + sphinxpath + "/scripts/" + step)
Solved by associating perl files ( with pl extension) to perl.exe
I'm trying to run a bash script inside GCP Function but somehow it's not working.
Here my function, which basically export a file(proxy) to Google Apigee:
def test2(request):
cmd = "python ./my-proxy/tools/deploy.py -n myProxy -u userName:!password -o myOrg -e test -d ./my-proxy -p /"
# no block, it start a sub process.
p = subprocess.Popen(cmd , shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# and you can block util the cmd execute finish
p.wait()
# or stdout, stderr = p.communicate()
return "Proxy deployed to Apigee"
Here's my deploy.py file looks like:
!/usr/bin/env python
import base64
import getopt
import httplib
import json
import re
import os
import sys
import StringIO
import urlparse
import xml.dom.minidom
import zipfile
def httpCall(verb, uri, headers, body):
if httpScheme == 'https':
conn = httplib.HTTPSConnection(httpHost)
else:
conn = httplib.HTTPConnection(httpHost)
if headers == None:
hdrs = dict()
else:
hdrs = headers
hdrs['Authorization'] = 'Basic %s' % base64.b64encode(UserPW)
conn.request(verb, uri, body, hdrs)
return conn.getresponse()
def getElementText(n):
c = n.firstChild
str = StringIO.StringIO()
while c != None:
if c.nodeType == xml.dom.Node.TEXT_NODE:
str.write(c.data)
c = c.nextSibling
return str.getvalue().strip()
def getElementVal(n, name):
c = n.firstChild
while c != None:
if c.nodeName == name:
return getElementText(c)
c = c.nextSibling
return None
# Return TRUE if any component of the file path contains a directory name that
# starts with a "." like '.svn', but not '.' or '..'
def pathContainsDot(p):
c = re.compile('\.\w+')
for pc in p.split('/'):
if c.match(pc) != None:
return True
return False
def getDeployments():
# Print info on deployments
hdrs = {'Accept': 'application/xml'}
resp = httpCall('GET',
'/v1/organizations/%s/apis/%s/deployments' \
% (Organization, Name),
hdrs, None)
if resp.status != 200:
return None
ret = list()
deployments = xml.dom.minidom.parse(resp)
environments = deployments.getElementsByTagName('Environment')
for env in environments:
envName = env.getAttribute('name')
revisions = env.getElementsByTagName('Revision')
for rev in revisions:
revNum = int(rev.getAttribute('name'))
error = None
state = getElementVal(rev, 'State')
basePaths = rev.getElementsByTagName('BasePath')
if len(basePaths) > 0:
basePath = getElementText(basePaths[0])
else:
basePath = 'unknown'
# svrs = rev.getElementsByTagName('Server')
status = {'environment': envName,
'revision': revNum,
'basePath': basePath,
'state': state}
if error != None:
status['error'] = error
ret.append(status)
return ret
def printDeployments(dep):
for d in dep:
print 'Environment: %s' % d['environment']
print ' Revision: %i BasePath = %s' % (d['revision'], d['basePath'])
print ' State: %s' % d['state']
if 'error' in d:
print ' Error: %s' % d['error']
ApigeeHost = 'https://api.enterprise.apigee.com'
UserPW = None
Directory = None
Organization = None
Environment = None
Name = None
BasePath = '/'
ShouldDeploy = True
Options = 'h:u:d:e:n:p:o:i:z:'
opts = getopt.getopt(sys.argv[1:], Options)[0]
for o in opts:
if o[0] == '-n':
Name = o[1]
elif o[0] == '-o':
Organization = o[1]
elif o[0] == '-h':
ApigeeHost = o[1]
elif o[0] == '-d':
Directory = o[1]
elif o[0] == '-e':
Environment = o[1]
elif o[0] == '-p':
BasePath = o[1]
elif o[0] == '-u':
UserPW = o[1]
elif o[0] == '-i':
ShouldDeploy = False
elif o[0] == '-z':
ZipFile = o[1]
if UserPW == None or \
(Directory == None and ZipFile == None) or \
Environment == None or \
Name == None or \
Organization == None:
print """Usage: deploy -n [name] (-d [directory name] | -z [zipfile])
-e [environment] -u [username:password] -o [organization]
[-p [base path] -h [apigee API url] -i]
base path defaults to "/"
Apigee URL defaults to "https://api.enterprise.apigee.com"
-i denotes to import only and not actually deploy
"""
sys.exit(1)
url = urlparse.urlparse(ApigeeHost)
httpScheme = url[0]
httpHost = url[1]
body = None
if Directory != None:
# Construct a ZIPped copy of the bundle in memory
tf = StringIO.StringIO()
zipout = zipfile.ZipFile(tf, 'w')
dirList = os.walk(Directory)
for dirEntry in dirList:
if not pathContainsDot(dirEntry[0]):
for fileEntry in dirEntry[2]:
if not fileEntry.endswith('~'):
fn = os.path.join(dirEntry[0], fileEntry)
en = os.path.join(
os.path.relpath(dirEntry[0], Directory),
fileEntry)
print 'Writing %s to %s' % (fn, en)
zipout.write(fn, en)
zipout.close()
body = tf.getvalue()
elif ZipFile != None:
f = open(ZipFile, 'r')
body = f.read()
f.close()
# Upload the bundle to the API
hdrs = {'Content-Type': 'application/octet-stream',
'Accept': 'application/json'}
uri = '/v1/organizations/%s/apis?action=import&name=%s' % \
(Organization, Name)
resp = httpCall('POST', uri, hdrs, body)
if resp.status != 200 and resp.status != 201:
print 'Import failed to %s with status %i:\n%s' % \
(uri, resp.status, resp.read())
sys.exit(2)
deployment = json.load(resp)
revision = int(deployment['revision'])
print 'Imported new proxy version %i' % revision
if ShouldDeploy:
# Undeploy duplicates
deps = getDeployments()
for d in deps:
if d['environment'] == Environment and \
d['basePath'] == BasePath and \
d['revision'] != revision:
print 'Undeploying revision %i in same environment and path:' % \
d['revision']
conn = httplib.HTTPSConnection(httpHost)
resp = httpCall('POST',
('/v1/organizations/%s/apis/%s/deployments' +
'?action=undeploy' +
'&env=%s' +
'&revision=%i') % \
(Organization, Name, Environment, d['revision']),
None, None)
if resp.status != 200 and resp.status != 204:
print 'Error %i on undeployment:\n%s' % \
(resp.status, resp.read())
# Deploy the bundle
hdrs = {'Accept': 'application/json'}
resp = httpCall('POST',
('/v1/organizations/%s/apis/%s/deployments' +
'?action=deploy' +
'&env=%s' +
'&revision=%i' +
'&basepath=%s') % \
(Organization, Name, Environment, revision, BasePath),
hdrs, None)
if resp.status != 200 and resp.status != 201:
print 'Deploy failed with status %i:\n%s' % (resp.status, resp.read())
sys.exit(2)
deps = getDeployments()
printDeployments(deps)
This works when I run locally on my machine, but not on GCP. Don't know if have anything to do with the fact I'm connecting to Google Apigee with this function. It's weird that the logs on GCP doesn't show any error, however I don't have my proxy exported to Apigee.
Thanks the help!
UPDATED:
tried using subprocess.check_output() as encouraged by some here:
def test(request):
output = None
try:
output = subprocess.check_output([
"./my-proxy/tools/deploy.py",
'-n', 'myProxy',
'-u', 'myUserName:myPassword',
'-o', 'myOrgName',
'-e', 'test',
'-d', './my-proxy',
'-p', '/'])
except:
print(output)
return output
And still not working on GCP. Like I mentioned before, it works like a charm (both solutions above) in my machine, but in GCP doesn't.
As you can see from the image below, I get a 200 after executing deploy.py from GCP but my file doesn't go to Apigee:
GCP logs doesn't show any error as well:
This is possible!
The python executable is not installed or linked in the Cloud Function runtime, but python3 is. Hence, there are a few ways to solve this:
specify python3 as the program to run: "python3 ./my-proxy/tools/deploy.py ...";
add the #! operator in the deploy.py script: #!/usr/bin/env python3;
specify the python interpreter to Popen. You can use sys.executable to refer to the currently used executable:
process = subprocess.Popen(
[
sys.executable,
"./deploy.py",
"-n",
"myProxy",
"-u",
"myUserName:myPassword",
"-o",
"myOrgName",
"-e",
"test",
"-d",
"./my-proxy",
"-p",
"/",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
You did not see an error because it was generated in a subprocess, printed to its stderr, and subsequently captured by your program with process.communicate() or process.check_output(...), but not printed. To see the error you are experiencing, you can print out the content of stdout and stderr:
out, err = process.communicate()
log.debug("returncode = %s", process.returncode)
log.debug("stdout = %s", out)
log.debug("stderr = %s", err)
Check out our source code we used to analyze, reproduce and solve your question on github
I have been trying to implement a wrapper around subprocess as follows:
def ans_cmd_stream_color(inputcmd):
"""Driver function for local ansible commands.
Stream stdout to stdout and log file with color.
Runs <inputcmd> via subprocess.
Returns return code, stdout, stderr as dict.
"""
fullcmd = inputcmd
create_debug('Enabling colorful ansible output.', LOGGER)
create_info('Running command: ' + fullcmd, LOGGER, True)
p = subprocess.Popen('export ANSIBLE_FORCE_COLOR=true; ' + fullcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout_l = []
stderr_l = []
rcode = 0
# Regex black magic
ansi_escape = re.compile(r'\x1b[^m]*m')
# Get the unbuffered IO action going.
try:
# Non blocking
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
# Print line by line
while True:
for fd in ret[0]:
if fd == p.stdout.fileno():
line = p.stdout.readline()
sys.stdout.write(line.encode('utf-8'))
stdout_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
if fd == p.stderr.fileno():
line = p.stdout.readline()
sys.stderr.write(line.encode('utf-8'))
stderr_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
# Break when the process is done.
if p.poll() is not None:
rcode = p.returncode
break
except BaseException as e:
raise e
outstr = ''.join(stdout_l)
errstr = ''.join(stderr_l)
outstr, errstr = str(outstr).rstrip('\n'), str(errstr).rstrip('\n')
expstr = errstr.strip('ERROR: ')
if len(expstr) >= 1:
create_info('Command: ' + str(fullcmd) + ': ' + expstr + '\n', LOGGER,
True)
if rcode == 0:
rcode = 1
else:
create_info(outstr + '\n', LOGGER)
if rcode == 0:
create_info('Command: ' + fullcmd + ' ran successfully.', LOGGER,
True)
expstr = False
ret_dict = {inputcmd: {}}
ret_dict[inputcmd]['rcode'] = rcode
ret_dict[inputcmd]['stdout'] = outstr
ret_dict[inputcmd]['stderr'] = expstr
return copy.deepcopy(ret_dict)
The idea is to print a streaming output of subprocess command and then return info to the function user. The issue is that even using a direct io.open, the subprocess PIP is still buffered unless I set:
os.environ["PYTHONUNBUFFERED"] = "1"
Which is not ideal. Any ideas or has anybody encountered this issue?
UPDATE: With ansible you need to disable buffering for subprocess to honor buffering settings:
def ans_cmd_stream_color(inputcmd):
"""Driver function for local ansible commands.
Stream stdout to stdout and log file with color.
Runs <inputcmd> via subprocess.
Returns return code, stdout, stderr as dict.
"""
fullcmd = inputcmd
create_debug('Enabling colorful ansible output.', LOGGER)
create_info('Running command: ' + fullcmd, LOGGER, True)
p = subprocess.Popen('export ANSIBLE_FORCE_COLOR=true; ' +
'export PYTHONUNBUFFERED=1; ' + fullcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout_l = []
stderr_l = []
rcode = 0
# Regex black magic
ansi_escape = re.compile(r'\x1b[^m]*m')
# Get the unbuffered IO action going.
try:
# Non blocking
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
# Print line by line
while True:
for fd in ret[0]:
if fd == p.stdout.fileno():
line = p.stdout.readline()
sys.stdout.write(line.encode('utf-8'))
stdout_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
if fd == p.stderr.fileno():
line = p.stdout.readline()
sys.stderr.write(line.encode('utf-8'))
stderr_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
# Break when the process is done.
if p.poll() is not None:
rcode = p.returncode
break
except BaseException as e:
raise e
outstr = ''.join(stdout_l)
errstr = ''.join(stderr_l)
outstr, errstr = str(outstr).rstrip('\n'), str(errstr).rstrip('\n')
expstr = errstr.strip('ERROR: ')
if len(expstr) >= 1:
create_info('Command: ' + str(fullcmd) + ': ' + expstr + '\n', LOGGER,
True)
if rcode == 0:
rcode = 1
else:
create_info(outstr + '\n', LOGGER)
if rcode == 0:
create_info('Command: ' + fullcmd + ' ran successfully.', LOGGER,
True)
expstr = False
ret_dict = {inputcmd: {}}
ret_dict[inputcmd]['rcode'] = rcode
ret_dict[inputcmd]['stdout'] = outstr
ret_dict[inputcmd]['stderr'] = expstr
return copy.deepcopy(ret_dict)
You should probably directly read from the subprocess pipes. Something like the following will read from the standard out to the information logger and the standard error to the error logger.
import logging, subprocess
logging.basicConfig(level=logging.INFO)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
cont = True
while cont:
cont = False
line = proc.stdout.readline()
if not line == b"":
out = line.decode("utf-8").rstrip()
logging.info(out)
cont = True
line = proc.stderr.readline()
if not line == b"":
out = line.decode("utf-8").rstrip()
logging.error(out)
cont = True
if not cont and proc.poll() is not None:
break
To address the buffering issue, per this question, either the subordinate Python script must explicitly flush the buffers, or the environment variable PYTHONUNBUFFERED must be set to a non-empty string.
I have two problems with the below code.
Problem 1 is that pty does not work on Windows, works on MAC, but I need this application to run on all platforms.
Problem 2 is when run this I get AttributeError: 'list' object has no attribute 'rstrip', not sure how to fix either problem.
command1 = transporterLink + " -m verify -f " + indir1 + " -u " + username + " -p " + password + " -o " + logPath + " -s " + provider1 + " -v eXtreme"
master, slave = pty.openpty()
process = Popen(command1, shell=True, stdin=PIPE, stdout=slave, stderr=slave, close_fds=True)
stdout = os.fdopen(master)
import select
q = select.poll()
q.register(stdout,select.POLLIN)
global subject
subject = "Test"
while True:
wx.Yield()
line = q.poll()
if not line:
continue # no input
else:
line = line.rstrip()
print line
if "Returning 1" in line:
result1 = "Verify FAILED!"
subject = "FAILED! - "
self.sendEmail(self)
break
if "Returning 0" in line:
result1 = "Verify PASSED!"
subject = "PASSED! - "
self.sendEmail(self)
break
q.poll returns a list. Lists don't have the rstrip method.
I have the following code:
pwd = '/home/user/svnexport/Repo/'
updateSVN = "svn up " + pwd
cmd = os.popen(updateSVN)
getAllInfo = "svn info " + pwd + "branches/* " + pwd + "tags/* " + pwd + "trunk/*"
cmd = os.popen(getAllInfo)
How can I be sure cmd = os.popen(updateSVN) has completed execution before cmd = os.popen(getAllInfo) begins execution?
You should use subprocess:
import subprocess
import glob
pwd = '/home/user/svnexport/Repo/'
updateSVN = ["svn", "up", pwd]
cmd = subprocess.Popen(updateSVN)
status = cmd.wait()
# the same can be achieved in a shorter way:
filelists = [glob.glob(pwd + i + "/*") for i in ('branches', 'tags', 'trunk')]
filelist = sum(filelists, []) # add them together
getAllInfo = ["svn", "info"] + filelist
status = subprocess.call(getAllInfo)
If you need to capture the subprocesses's output, instead do
process = subprocess.Popen(..., stdout=subprocess.PIPE)
data = process.stdout.read()
status = subprocess.wait()
If you need to want for the first command to terminate, you don't really need multithreading. You can just do
os.system(updateSVN)
os.system(getAllInfo)
If you really want to use updateSVN you can wait for it by
for _ in cmd:
pass
Try the wait() method:
pwd = '/home/user/svnexport/Repo/'
updateSVN = "svn up " + pwd
cmd = os.popen(updateSVN)
cmd.wait()
getAllInfo = "svn info " + pwd + "branches/* " + pwd + "tags/* " + pwd + "trunk/*"
cmd = os.popen(getAllInfo)
If you want to wait, the simplest way is to use one of the following subprocess functions
call
check_call
check_output
Each one of those returns only after the command execution in the shell completes, see docs for details