mkdir and touch does not work in the same subprocess - python

import subprocess
def execCommand(cmd):
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return proc.returncode, out, err
except:
print "failed"
return -1, '', ''
if __name__ == '__main__':
res, out, error = execCommand(["mkdir", "-p", "/tmp/newdir/", "&&", "touch", "/tmp/newdir/myfile.txt"])
The touch command above generates myfile.txt as a directory instead of file, could someone explain why?

Don't use subprocess at all for this. The external commands you are running wrap the same library calls that Python can call itself.
import os
os.makedirs("/tmp/newdir")
with open("/tmp/newdir/myfile.txt", "w"):
pass
If you really want a temporary file, consider using the tempfile module instead to create the file securely.

Related

Getting permission denied error when executing a command via subprocess.Popen but not in shell

I am trying to move a folder into another folder but am getting Permission Denied error when I try to perform this operation in a Python script vs. the move working successfully when I run it in bash or even in Python interactive mode.
cmd = ['sudo', 'mv', '/path1/dir', '/path2']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print(stderr)
I also tried adding shell=True.
p = subprocess.Popen(' '.join(cmd), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print(stderr)
In both cases, I am getting the following error:
"mv: cannot move '/path1/dir' to '/path2/dir': Permission denied\n"
I invoke my script in the following manner:
sudo python script.py
I tried executing each command in shell as well as Python interactive mode and I don't get any errors. Any idea what is going on over here?
After wasting hours of time debugging as to what was going wrong, I finally figured out what was happening. I was creating /path1 and /path2 using tempfile. Here is a snippet of the code:
class UtilitiesTest(unittest.TestCase):
#staticmethod
def createTestFiles():
dir = tempfile.mkdtemp()
_, file = tempfile.mkstemp(dir=dir)
return dir, file
def test_MoveFileToAnotherLocation(self):
src_dir, src_file = UtilitiesTest.createTestFiles()
dest_dir, dest_file = UtilitiesTest.createTestFiles()
cmd = ['sudo', 'mv', src_dir, dest_dir]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print(stderr)
Like zwer said in the comments, if I am running this script using sudo, I don't need to add sudo in my mv command. Because I kept getting permission denied errors, I kept thinking that sudo would fix my problem. The actual issue over here was when tempfile.mkstemp() is called, it returns an open file descriptor along with the file path. I didn't pay much attention to the first argument, so when I modified my createTestFiles() to below, everything started working.
#staticmethod
def createTestFiles():
dir = tempfile.mkdtemp()
fd, file = tempfile.mkstemp(dir=dir)
os.close(fd)
return dir, file

Redirecting shell command output to a file does not work using subprocess.Popen in Python

I am using Python 2.6.6 and failed to re-direct the Beeline(Hive) SQL query output returning multiple rows to a file on Unix using ">". For simplicity's sake, I replaced the SQL query with simple "ls" command on current directory and outputting to a text file.
Please ignore syntax of function sendfile. I want help to tweak the function "callcmd" to pipe the stdout onto the text file.
def callcmd(cmd, shl):
logging.info('> '+' '.join(map(str,cmd)))
#return 0;
start_time = time.time()
command_process = subprocess.Popen(cmd, shell=shl, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
command_output = command_process.communicate()[0]
logging.info(command_output)
elapsed_time = time.time() - start_time
logging.info(time.strftime("%H:%M:%S",time.gmtime(elapsed_time))+' = time to complete (hh:mm:ss)')
if (command_process.returncode != 0):
logging.error('ERROR ON COMMAND: '+' '.join(map(str,cmd)))
logging.error('ERROR CODE: '+str(ret_code))
return command_process.returncode
cmd=['ls', ' >', '/home/input/xyz.txt']
ret_code = callcmd(cmd, False)
Your command (i.e. cmd) could be ['sh', '-c', 'ls > ~/xyz.txt']. That would mean that the output of ls is never passed to Python, it happens entirely in the spawned shell – so you can't log the output. In that case, I'd have used return_code = subprocess.call(cmd), no need for Popen and communicate.
Equivalently, assuming you use bash or similar, you can simply use
subprocess.call('ls > ~/test.txt', shell=True)
If you want to access the output, e.g. for logging, you could use
s = subprocess.check_output(['ls'])
and then write that to a file like you would regularly in Python. To check for a non-zero exit code, handle the CalledProcessError that is raised in such cases.
Here the stdout in command_output is written to a file. You don't need to use any redirection although an alternative might be to have the python print to stdout, and then you would redirect that in your shell to a file.
#!/usr/bin/python
import subprocess
cmd=['ls']
command_process = subprocess.Popen(
cmd,
shell='/bin/bash',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
command_output = command_process.communicate()[0]
if (command_process.returncode != 0):
logging.error('ERROR ON COMMAND: '+' '.join(map(str,cmd)))
logging.error('ERROR CODE: '+str(ret_code))
f = open('listing.txt','w')
f.write(command_output)
f.close()
I added this piece of code to my code and It works fine.Thanks to #Snohdo
f = open('listing.txt','w')
f.write(command_output)
f.close()

Live stdout output from Python subprocess in Jupyter notebook

I'm using subprocess to run a command line program from a Python (3.5.2) script, which I am running in a Jupyter notebook. The subprocess takes a long time to run and so I would like its stdout to be printed live to the screen in the Jupyter notebook.
I can do this no problem in a normal Python script run from the terminal. I do this using:
def run_command(cmd):
from subprocess import Popen, PIPE
import shlex
with Popen(shlex.split(cmd), stdout=PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
print(line, end='')
exit_code = p.poll()
return exit_code
However, when I run the script in a Jupyter notebook, it does not print the stdout live to the screen. Instead, it prints everything after the subprocess has finished running.
Does anyone have any ideas on how to remedy this?
Many thanks,
Johnny
The ipython notebook has it's own support for running shell commands. If you don't need to capture with subprocess stuff you can just do
cmd = 'ls -l'
!{cmd}
Output from commands executed with ! is automatically piped through the notebook.
If you set stdout = None (this is the default, so you can omit the stdout argument altogether), then your process should write its output to the terminal running your IPython notebook server.
This happens because the default behavior is for subprocess to inherit from the parent file handlers (see docs).
Your code would look like this:
from subprocess import Popen, PIPE
import shlex
def run_command(cmd):
p = Popen(shlex.split(cmd), bufsize=1, universal_newlines=True)
return p.poll()
This won't print to the notebook in browser, but at least you will be able to see the output from your subprocess asynchronously while other code is running.
Hope this helps.
Jupyter mucks with stdout and stderr. This should get what you want, and give you a more useful exception when the command fails to boot.
import signal
import subprocess as sp
class VerboseCalledProcessError(sp.CalledProcessError):
def __str__(self):
if self.returncode and self.returncode < 0:
try:
msg = "Command '%s' died with %r." % (
self.cmd, signal.Signals(-self.returncode))
except ValueError:
msg = "Command '%s' died with unknown signal %d." % (
self.cmd, -self.returncode)
else:
msg = "Command '%s' returned non-zero exit status %d." % (
self.cmd, self.returncode)
return f'{msg}\n' \
f'Stdout:\n' \
f'{self.output}\n' \
f'Stderr:\n' \
f'{self.stderr}'
def bash(cmd, print_stdout=True, print_stderr=True):
proc = sp.Popen(cmd, stderr=sp.PIPE, stdout=sp.PIPE, shell=True, universal_newlines=True,
executable='/bin/bash')
all_stdout = []
all_stderr = []
while proc.poll() is None:
for stdout_line in proc.stdout:
if stdout_line != '':
if print_stdout:
print(stdout_line, end='')
all_stdout.append(stdout_line)
for stderr_line in proc.stderr:
if stderr_line != '':
if print_stderr:
print(stderr_line, end='', file=sys.stderr)
all_stderr.append(stderr_line)
stdout_text = ''.join(all_stdout)
stderr_text = ''.join(all_stderr)
if proc.wait() != 0:
raise VerboseCalledProcessError(proc.returncode, cmd, stdout_text, stderr_text)
Replacing the for loop with the explicit readline() call worked for me.
from subprocess import Popen, PIPE
import shlex
def run_command(cmd):
with Popen(shlex.split(cmd), stdout=PIPE, bufsize=1, universal_newlines=True) as p:
while True:
line = p.stdout.readline()
if not line:
break
print(line)
exit_code = p.poll()
return exit_code
Something is still broken about their iterators, even 4 years later.

Calling external program through Python and passing the output to stdout

How can I call an external program which is written in bash script in such a way that the output produced by that script is available in sys.stdout so that I can log the output in a file through python.
For example. I now call them through the following snippet
if os.name == 'nt':
path = module_dir_run+'/run.bat'
else:
path = module_dir_run+'/run.sh'
if os.path.isfile(path):
if (splitargs.arg):
try:
call([path, splitargs.arg])
except:
pass
else:
try:
call([path])
except:
pass
else:
print "Not found : " + path
when I store the value of sys.stdout = file(filename, "w") it stores whatever which the python outputs, but not what the script outputs.
NOTE: The script which i am trying to run is an interactive script, so after the call has ended, and the control has come back to python, how can i get all what is written in the terminal?
Any suggestions?
I always use subprocess.Popen() to run another program from inside a Python script. Example:
import subprocess
print "Starting process..."
process = subprocess.Popen(["echo", "a"], shell=False)
process.wait()
print "Done"
You can redirect the output of process to another file like this:
import subprocess
print "Starting process..."
with open("./out.log", "w") as f:
process = subprocess.Popen(["echo", "a"], shell=False, stdout=f)
process.wait()
print "Done"
Redirecting stderr is also possible through 'stderr' parameter.
When you have the sys.stdout in current script redirected to write to your own file, you can do this to redirect it in your subprocess too:
import subprocess
import sys
sys.stdout = open("./out.log", "w")
print "Starting process..."
sys.stdout.flush()
process = subprocess.Popen(["echo", "a"], shell=False, stdout=sys.stdout)
process.wait()
print "Done"
You can write the outputs of such a call into a variable using e.g.:
variable = subprocess.Popen(
['ls', 'mydir/'], # note that the command is split on whitespace
stdout=subprocess.PIPE).communicate()[0]
As Martijn Pieters already states, you'll have to provide an alternative stdout for your call() call.
There are essentially 2 ways:
* either provide an already opened file (as you have changed your sys.stdout, this will probably be fine)
* use stdout=subprocess.PIPE and read and treat the stdout yourself. (Not via subprocess.call() then, but via the object returned by subprocess.Popen().)
Note that changing sys.stdout won't really change your process's stdout from the view of the OS, but instead will order your program to output everything on a non-usual file descriptor.
This might be of use. Particularly because most of the other answers don't understand that you need to redirect your parent shell's stdin to the child process's stdin because the programs/scripts you're calling are "interactive".
Python and subprocess input piping

Store output of subprocess.Popen call in a string [duplicate]

This question already has answers here:
Running shell command and capturing the output
(21 answers)
Closed 1 year ago.
I'm trying to make a system call in Python and store the output to a string that I can manipulate in the Python program.
#!/usr/bin/python
import subprocess
p2 = subprocess.Popen("ntpq -p")
I've tried a few things including some of the suggestions here:
Retrieving the output of subprocess.call()
but without any luck.
In Python 2.7 or Python 3
Instead of making a Popen object directly, you can use the subprocess.check_output() function to store output of a command in a string:
from subprocess import check_output
out = check_output(["ntpq", "-p"])
In Python 2.4-2.6
Use the communicate method.
import subprocess
p = subprocess.Popen(["ntpq", "-p"], stdout=subprocess.PIPE)
out, err = p.communicate()
out is what you want.
Important note about the other answers
Note how I passed in the command. The "ntpq -p" example brings up another matter. Since Popen does not invoke the shell, you would use a list of the command and options—["ntpq", "-p"].
This worked for me for redirecting stdout (stderr can be handled similarly):
from subprocess import Popen, PIPE
pipe = Popen(path, stdout=PIPE)
text = pipe.communicate()[0]
If it doesn't work for you, please specify exactly the problem you're having.
Python 2: http://docs.python.org/2/library/subprocess.html#subprocess.Popen
from subprocess import PIPE, Popen
command = "ntpq -p"
process = Popen(command, stdout=PIPE, stderr=None, shell=True)
output = process.communicate()[0]
print output
In the Popen constructor, if shell is True, you should pass the command as a string rather than as a sequence. Otherwise, just split the command into a list:
command = ["ntpq", "-p"]
process = Popen(command, stdout=PIPE, stderr=None)
If you need to read also the standard error, into the Popen initialization, you should set stderr to PIPE or STDOUT:
command = "ntpq -p"
process = subprocess.Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
output, error = process.communicate()
NOTE: Starting from Python 2.7, you could/should take advantage of subprocess.check_output (https://docs.python.org/2/library/subprocess.html#subprocess.check_output).
Python 3: https://docs.python.org/3/library/subprocess.html#subprocess.Popen
from subprocess import PIPE, Popen
command = "ntpq -p"
with Popen(command, stdout=PIPE, stderr=None, shell=True) as process:
output = process.communicate()[0].decode("utf-8")
print(output)
NOTE: If you're targeting only versions of Python higher or equal than 3.5, then you could/should take advantage of subprocess.run (https://docs.python.org/3/library/subprocess.html#subprocess.run).
In Python 3.7+ you can use the new capture_output= keyword argument for subprocess.run:
import subprocess
p = subprocess.run(["echo", "hello world!"], capture_output=True, text=True)
assert p.stdout == 'hello world!\n'
Assuming that pwd is just an example, this is how you can do it:
import subprocess
p = subprocess.Popen("pwd", stdout=subprocess.PIPE)
result = p.communicate()[0]
print result
See the subprocess documentation for another example and more information.
for Python 2.7+ the idiomatic answer is to use subprocess.check_output()
You should also note the handling of arguments when invoking a subprocess, as it can be a little confusing....
If args is just single command with no args of its own (or you have shell=True set), it can be a string. Otherwise it must be a list.
for example... to invoke the ls command, this is fine:
from subprocess import check_call
check_call('ls')
so is this:
from subprocess import check_call
check_call(['ls',])
however, if you want to pass some args to the shell command, you can't do this:
from subprocess import check_call
check_call('ls -al')
instead, you must pass it as a list:
from subprocess import check_call
check_call(['ls', '-al'])
the shlex.split() function can sometimes be useful to split a string into shell-like syntax before creating a subprocesses...
like this:
from subprocess import check_call
import shlex
check_call(shlex.split('ls -al'))
This works perfectly for me:
import subprocess
try:
#prints results and merges stdout and std
result = subprocess.check_output("echo %USERNAME%", stderr=subprocess.STDOUT, shell=True)
print result
#causes error and merges stdout and stderr
result = subprocess.check_output("copy testfds", stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, ex: # error code <> 0
print "--------error------"
print ex.cmd
print ex.message
print ex.returncode
print ex.output # contains stdout and stderr together
This was perfect for me.
You will get the return code, stdout and stderr in a tuple.
from subprocess import Popen, PIPE
def console(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
out, err = p.communicate()
return (p.returncode, out, err)
For Example:
result = console('ls -l')
print 'returncode: %s' % result[0]
print 'output: %s' % result[1]
print 'error: %s' % result[2]
The accepted answer is still good, just a few remarks on newer features. Since python 3.6, you can handle encoding directly in check_output, see documentation. This returns a string object now:
import subprocess
out = subprocess.check_output(["ls", "-l"], encoding="utf-8")
In python 3.7, a parameter capture_output was added to subprocess.run(), which does some of the Popen/PIPE handling for us, see the python docs :
import subprocess
p2 = subprocess.run(["ls", "-l"], capture_output=True, encoding="utf-8")
p2.stdout
I wrote a little function based on the other answers here:
def pexec(*args):
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()
Usage:
changeset = pexec('hg','id','--id')
branch = pexec('hg','id','--branch')
revnum = pexec('hg','id','--num')
print('%s : %s (%s)' % (revnum, changeset, branch))
import os
list = os.popen('pwd').read()
In this case you will only have one element in the list.
import subprocess
output = str(subprocess.Popen("ntpq -p",shell = True,stdout = subprocess.PIPE,
stderr = subprocess.STDOUT).communicate()[0])
This is one line solution
The following captures stdout and stderr of the process in a single variable. It is Python 2 and 3 compatible:
from subprocess import check_output, CalledProcessError, STDOUT
command = ["ls", "-l"]
try:
output = check_output(command, stderr=STDOUT).decode()
success = True
except CalledProcessError as e:
output = e.output.decode()
success = False
If your command is a string rather than an array, prefix this with:
import shlex
command = shlex.split(command)
Use check_output method of subprocess module
import subprocess
address = '192.168.x.x'
res = subprocess.check_output(['ping', address, '-c', '3'])
Finally parse the string
for line in res.splitlines():
Hope it helps, happy coding
For python 3.5 I put up function based on previous answer. Log may be removed, thought it's nice to have
import shlex
from subprocess import check_output, CalledProcessError, STDOUT
def cmdline(command):
log("cmdline:{}".format(command))
cmdArr = shlex.split(command)
try:
output = check_output(cmdArr, stderr=STDOUT).decode()
log("Success:{}".format(output))
except (CalledProcessError) as e:
output = e.output.decode()
log("Fail:{}".format(output))
except (Exception) as e:
output = str(e);
log("Fail:{}".format(e))
return str(output)
def log(msg):
msg = str(msg)
d_date = datetime.datetime.now()
now = str(d_date.strftime("%Y-%m-%d %H:%M:%S"))
print(now + " " + msg)
if ("LOG_FILE" in globals()):
with open(LOG_FILE, "a") as myfile:
myfile.write(now + " " + msg + "\n")

Categories

Resources