Encrypt file with GPG in python 3 with subprocess - python

How do I encrypt file with subprocess, so the output will be string.
password = '%030x' % random.randrange(16**30)
encrypted_file = subprocess.geststatusoutput("echo "+password+
"|gpg --password-fd 0 < "+ name_of_selected_file)
I want to encrypted _file to be a string so I could use it as encrypted file to upload with post request.
What is the best way to do it with gnupg library?

You can use subprocess.Popen() to execute a gpg command like this:
import shlex
import random
from subprocess import Popen, PIPE
passphrase = '%030x' % random.randrange(16**30)
source_filename = '/tmp/somefile'
cmd = 'gpg --batch --symmetric --cipher-algo AES256 --passphrase-fd 0 --output - {}'.format(source_filename)
# error handling omitted
p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE)
encrypted_data = p.communicate(passphrase.encode())[0]
# Decryption - should work for Python 2 & 3
import os
r, w = os.pipe() # pipe for sending passphrase from parent to child
try:
os.set_inheritable(r, True)
except AttributeError: # new in version 3.4
pass
cmd = 'gpg --batch --decrypt --passphrase-fd {}'.format(r)
p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, close_fds=False)
os.close(r) # closes fd in parent, child needs it
f = os.fdopen(w, 'w')
f.write(passphrase + '\n') # '\n' seems required for Python 2 ???
f.close()
decrypted_data, stderr = p.communicate(encrypted_data)
# check that data was successfully roundtripped
assert open(source_filename).read() == decrypted_data.decode()
Or, decryption for Python 3 only:
import os
r, w = os.pipe()
cmd = 'gpg --batch --decrypt --passphrase-fd {}'.format(r)
p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, pass_fds=(r,))
os.close(r) # closes fd in parent, child needs it
open(w, 'w').write(passphrase)
decrypted_data, stderr = p.communicate(encrypted_data)
# check that data was successfully roundtripped
assert open(source_filename).read() == decrypted_data.decode()
Now I am not an expert on the security of this method, however, writing the passphrase directly to the child process' stdin with communicate() is better than echoing the passphrase on the command line - which would be visible to anybody that could run ps or equivalent.
Also, relying on the shell level IO redirection of your command would require the shell=True argument to Popen which might have other security implications (see the warning in the Popen() documentation).
I have assumed in my gpg command that you are intending to use a symmetric encryption (your example does not suggest otherwise). If the remote server needs to be able to decrypt the encrypted file content, how are you going to share the generated passphrase?
You might be better off looking at using public key crypto instead.

Related

Pass_fds alternative in python 2.7

I am currently using popen to call a Unix command which accepts multiple files as arguments and instead of using files I would like to pass the data from memory as a variable/file object. With this command actual files need to be specified with the command as it does not read them from STDIN. I can pass one file to the command by using '/dev/fd/0' as an argument and passing the contents of the file to STDIN, via communicate() but I am looking for a way to pass multiple files.
I believe I need to use file descriptors here in order to achieve this and from looking I can see python 3+ has an option called pass_fds, but no such option exists in 2.7.
Is there any way to do this in python 2.7, I guess you'd need to use os.pipe perhaps?
Thanks
I am sure there's a much better way of doing but I managed to do what I needed:
from subprocess import PIPE, Popen
import os
fakefiles = []
fd2 = 10 # Arbitrary starting fd number
fakefiles.append("""The entire
content of
file one
""")
fakefiles.append("content of file two")
def fd_file_list(fd, maxrange):
fdlist = []
for i in range(0, maxrange):
fdlist.append('/dev/fd/' + str(fd))
fd += 1
return fdlist
def create_fds(fd, files):
for content in files:
r, w = os.pipe()
w = os.fdopen(w, 'w')
w.write(content)
w.close()
os.dup2(r, fd)
fd += 1
fd_files = fd_file_list(fd2, len(fakefiles))
p2 = Popen(['/home/pi/myscript.sh'] + fd_files, stdin=PIPE, stdout=PIPE, stderr=PIPE, preexec_fn=create_fds(fd2, fakefiles))
out, err = p2.communicate()
print out
Where the content of /home/pi/myscript.sh is:
#!/bin/bash
((!$#)) && exit
for i; do
echo -e "\n\nfile is $i"
cat $i
done

control stdin and stdout of a ruby program in python

First I should notice: I'm a python programmer with no knowledge about ruby!
Now, I need to feed stdin of a ruby program and capture stdout of the script with
a python program.
I tried this (forth solution) and the code works in python2.7 but not in python3; The python3 code reads input with no output.
Now, I need a way to tie the ruby program to either python 2 or 3.
My try:
This code written with six module to have cross version compatibility.
python code:
from subprocess import Popen, PIPE as pipe, STDOUT as out
import six
print('launching slave')
slave = Popen(['ruby', 'slave.rb'], stdin=pipe, stdout=pipe, stderr=out)
while True:
if six.PY3:
from sys import stderr
line = input('enter command: ') + '\n'
line = line.encode('ascii')
else:
line = raw_input('entercommand: ') + '\n'
slave.stdin.write(line)
res = []
while True:
if slave.poll() is not None:
print('slave rerminated')
exit()
line = slave.stdout.readline().decode().rstrip()
print('line:', line)
if line == '[exit]': break
res.append(line)
print('results:')
print('\n'.join(res))
ruby code:
while cmd = STDIN.gets
cmd.chop!
if cmd == "exit"
break
else
print eval(cmd), "\n"
print "[exit]\n"
STDOUT.flush
end
end
NOTE:
Either another way to do this stuff is welcomed! (like socket programming, etc.)
Also I think it's a better idea to not using pipe as stdout and use a file-like object. (like tempfile or StringIO or etc.)
It's because of bufsize. In Python 2.x, default value was 0 (unbufffered). And in Python 3.x it changed to -1 (using default buffer size of system).
Specifying it explicitly will solve your problem.
slave = Popen(['ruby', 'slave.rb'], stdin=pipe, stdout=pipe, stderr=out, bufsize=0)
DEMO
Below is the code on how I got it working with Ruby & Python3.
Ruby Slave
# read command from standard input:
while cmd = STDIN.gets
# remove whitespaces:
cmd.chop!
# if command is "exit", terminate:
if cmd == "exit"
break
else
# else evaluate command, send result to standard output:
print eval(cmd), "\n"
print "[exit]\n"
# flush stdout to avoid buffering issues:
STDOUT.flush
end
end
Python master
from subprocess import Popen, PIPE as pipe, STDOUT as out
print('Launching slave')
slave = Popen(['ruby', 'slave.rb'], stdin=pipe, stdout=pipe, stderr=out, bufsize=0)
while True:
from sys import stderr
line = input('Enter command: ') + '\n'
line = line.encode('ascii')
slave.stdin.write(line)
res = []
while True:
if slave.poll() is not None:
print('Slave terminated')
exit()
line = slave.stdout.readline().decode().rstrip()
if line == '[exit]': break
res.append(line)
print('results:')
print('\n'.join(res))

Using POpen to send a variable to Stdin and to send Stdout to a variable

In shell script, we have the following command:
/script1.pl < input_file| /script2.pl > output_file
I would like to replicate the above stream in Python using the module subprocess. input_file is a large file, and I can't read the whole file at once. As such I would like to pass each line, an input_string into the pipe stream and return a string variable output_string, until the whole file has been streamed through.
The following is a first attempt:
process = subprocess.Popen(["/script1.pl | /script2.pl"], stdin = subprocess.PIPE, stdout = subprocess.PIPE, shell = True)
process.stdin.write(input_string)
output_string = process.communicate()[0]
However, using process.communicate()[0] closes the stream. I would like to keep the stream open for future streams. I have tried using process.stdout.readline(), instead, but the program hangs.
To emulate /script1.pl < input_file | /script2.pl > output_file shell command using subprocess module in Python:
#!/usr/bin/env python
from subprocess import check_call
with open('input_file', 'rb') as input_file
with open('output_file', 'wb') as output_file:
check_call("/script1.pl | /script2.pl", shell=True,
stdin=input_file, stdout=output_file)
You could write it without shell=True (though I don't see a reason here) based on 17.1.4.2. Replacing shell pipeline example from the docs:
#!/usr/bin/env python
from subprocess import Popen, PIPE
with open('input_file', 'rb') as input_file
script1 = Popen("/script1.pl", stdin=input_file, stdout=PIPE)
with open("output_file", "wb") as output_file:
script2 = Popen("/script2.pl", stdin=script1.stdout, stdout=output_file)
script1.stdout.close() # allow script1 to receive SIGPIPE if script2 exits
script2.wait()
script1.wait()
You could also use plumbum module to get shell-like syntax in Python:
#!/usr/bin/env python
from plumbum import local
script1, script2 = local["/script1.pl"], local["/script2.pl"]
(script1 < "input_file" | script2 > "output_file")()
See also How do I use subprocess.Popen to connect multiple processes by pipes?
If you want to read/write line by line then the answer depends on the concrete scripts that you want to run. In general it is easy to deadlock sending/receiving input/output if you are not careful e.g., due to buffering issues.
If input doesn't depend on output in your case then a reliable cross-platform approach is to use a separate thread for each stream:
#!/usr/bin/env python
from subprocess import Popen, PIPE
from threading import Thread
def pump_input(pipe):
try:
for i in xrange(1000000000): # generate large input
print >>pipe, i
finally:
pipe.close()
p = Popen("/script1.pl | /script2.pl", shell=True, stdin=PIPE, stdout=PIPE,
bufsize=1)
Thread(target=pump_input, args=[p.stdin]).start()
try: # read output line by line as soon as the child flushes its stdout buffer
for line in iter(p.stdout.readline, b''):
print line.strip()[::-1] # print reversed lines
finally:
p.stdout.close()
p.wait()

Not able to give inputs to subprocess(process which runs adb shell command) after 100 iterations

I want to run a stress test for adb(android debug bridge) shell. ( adb shell in this respect just a command line tool provided by Android phones).
I create a sub-process from python and in this subprocess i execute 'adb shell' command. there are some commands which has to be given to this subprocess which I am providing via stdin proper of the sub process.
Everything seems to be fine but when I am running a stress test. after around 100 iterations the command which I give to stdin does not reach to subprocess. If I run commands in separate terminal it is running fine. but the problem is with this stdin.
Can anyone tell me what I am doing wrong. Below is the code sample
class ADB():
def __init__(self):
self.proc = subprocess.Popen('adb shell', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,bufsize=0)
def provideAMcommand(self, testParam):
try:
cmd1 = "am startservice -n com.test.myapp/.ADBSupport -e \"" + "command" + "\" \"" + "test" + "\""
cmd2 = " -e \"" + "param" + "\"" + " " + testParam
print cmd1+cmd2
sys.stdout.flush()
self.proc.stdin.write(cmd1 + cmd2 + "\n")
except:
raise Exception("Phone is not Connected to Desktop or ADB is not available \n")
If it works for the first few commands but blocks later then you might forgot to read from self.proc.stdout that might lead to (as the docs warn) to OS pipe buffer filling up and blocking the child process.
To discard the output, redirect it to os.devnull:
import os
from subprocess import Popen, PIPE, STDOUT
DEVNULL = open(os.devnull, 'wb')
# ...
self.proc = Popen(['adb', 'shell'], stdin=PIPE, stdout=DEVNULL, stderr=STDOUT)
# ...
self.proc.stdin.write(cmd1 + cmd2 + "\n")
self.proc.stdin.flush()
There is pexpect module that might be a better tool for a dialog-based interaction (if you want both read/write intermitently).
IN provideAMcommand you are writing to and flushing the stdout of your main process. That will not send anything to the stdin of the child process you have created with Popen. The following code creates a new bash child process, a bit like the code in your __init__:
import subprocess as sp
cproc = sp.Popen("bash", stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
Now, the easiest way to communicate with that child process is the following:
#Send command 'ls' to bash.
out, err = cproc.communicate("ls")
This will send the text "ls" and EOF to bash (equal to running a bash script with only the text "ls" in it). Bash will execute the ls command and then quit. Anything that bash or ls write to stdout and stderr will end up in the variables out and err respectively. I have not used the adb shell, but I guess it behaves like bash in this regard.
If you just want your child process to print to the terminal, don't specify the stdout and stderr arguments to Popen.
You can check the exit code of the child, and raise an exception if it is non-zero (indicating an error):
if (cproc.returncode != 0):
raise Exception("Child process returned non-zero exit code")

Interacting with bash from python

I've been playing around with Python's subprocess module and I wanted to do an "interactive session" with bash from python. I want to be able to read bash output/write commands from Python just like I do on a terminal emulator. I guess a code example explains it better:
>>> proc = subprocess.Popen(['/bin/bash'])
>>> proc.communicate()
('user#machine:~/','')
>>> proc.communicate('ls\n')
('file1 file2 file3','')
(obviously, it doesn't work that way.) Is something like this possible, and how?
Thanks a lot
Try with this example:
import subprocess
proc = subprocess.Popen(['/bin/bash'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = proc.communicate('ls -lash')
print stdout
You have to read more about stdin, stdout and stderr. This looks like good lecture: http://www.doughellmann.com/PyMOTW/subprocess/
EDIT:
Another example:
>>> process = subprocess.Popen(['/bin/bash'], shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
>>> process.stdin.write('echo it works!\n')
>>> process.stdout.readline()
'it works!\n'
>>> process.stdin.write('date\n')
>>> process.stdout.readline()
'wto, 13 mar 2012, 17:25:35 CET\n'
>>>
Use this example in my other answer: https://stackoverflow.com/a/43012138/3555925
You can get more details in that answer.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import select
import termios
import tty
import pty
from subprocess import Popen
command = 'bash'
# command = 'docker run -it --rm centos /bin/bash'.split()
# save original tty setting then set it to raw mode
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
p = Popen(command,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
This should be what you want
import subprocess
import threading
p = subprocess.Popen(["bash"], stderr=subprocess.PIPE,shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
exit = False
def read_stdout():
while not exit:
msg = p.stdout.readline()
print("stdout: ", msg.decode())
def read_stderro():
while not exit:
msg = p.stderr.readline()
print("stderr: ", msg.decode())
threading.Thread(target=read_stdout).start()
threading.Thread(target=read_stderro).start()
while not exit:
res = input(">")
p.stdin.write((res + '\n').encode())
p.stdin.flush()
Test result:
>ls
>stdout: 1.py
stdout: 2.py
ssss
>stderr: bash: line 2: ssss: command not found
An interactive bash process expects to be interacting with a tty. To create a pseudo-terminal, use os.openpty(). This will return a slave_fd file descriptor that you can use to open files for stdin, stdout, and stderr. You can then write to and read from master_fd to interact with your process. Note that if you're doing even mildly complex interaction, you'll also want to use the select module to make sure that you don't deadlock.
I wrote a module to facilitate the interaction between *nix shell and python.
def execute(cmd):
if not _DEBUG_MODE:
## Use bash; the default is sh
print 'Output of command ' + cmd + ' :'
subprocess.call(cmd, shell=True, executable='/bin/bash')
print ''
else:
print 'The command is ' + cmd
print ''
Check out the whole stuff at github: https://github.com/jerryzhujian9/ez.py/blob/master/ez/easyshell.py

Categories

Resources