How is Python blocking signals while os.system("sleep...")? - python

When I run this Python script with os.system on Ubuntu 12.04:
import os, signal
signal.signal(signal.SIGABRT, lambda *args: os.write(2, 'HANDLER\n'))
print 'status=%r' % os.system('sleep 5')
, and then I send SIGABRT to the script process many times within 5 seconds, I get the following output:
status=0
HANDLER
This indicates that the signal delivery was blocked until sleep 5 exited, and then only a single signal was delivered.
However, with subprocess.call:
import os, signal, subprocess
signal.signal(signal.SIGABRT, lambda *args: os.write(2, 'HANDLER\n'))
print 'cstatus=%r' % subprocess.call('sleep 5', shell=True)
, all individual signals are delivered early:
HANDLER
HANDLER
HANDLER
cstatus=0
To distinguish the magic in glibc from the magic in Python, I rewrote the Python script in C, so os.system became system(3):
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static void handler(int signum) { (void)signum; write(2, "HANDLER\n", 8); }
int main(int argc, char **argv) {
int got;
struct sigaction sa;
(void)argc; (void)argv;
memset(&sa, 0, sizeof sa);
sa.sa_handler = handler;
if (0 != sigaction(SIGABRT, &sa, NULL)) return 3;
got = system("sleep 5");
return !printf("system=0x%x\n", got);
}
Signals got delivered early:
HANDLER
HANDLER
HANDLER
system=0x0
So I inferred that the magic is in Python 2.7, not in eglibc. But where is the magic? Based on the strace output and looking at the posix_system function in Modules/posixmodule.c, I couldn't figure out how Python blocks the signal until os.system returns.
Relevant code from Modules/posixmodule.c:
static PyObject *posix_system(PyObject *self, PyObject *args) {
char *command;
long sts;
if (!PyArg_ParseTuple(args, "s:system", &command)) return NULL;
Py_BEGIN_ALLOW_THREADS
sts = system(command);
Py_END_ALLOW_THREADS
return PyInt_FromLong(sts);
}
Maybe the magic is in Py_BEGIN_ALLOW_THREADS?
Do I understand correctly that it's impossible for my Python signal handler (set up by signal.signal) to execute until os.system returns?
Is it because signal handlers are blocked (on the Python level, not on the OS level) until Py_END_ALLOW_THREADS returns?
Here is the strace output of the Python code with os.system: http://pastebin.com/Wjn9KBye

Maybe the magic is in PY_BEGIN_ALLOW_THREADS?
The magic is mostly in system itself. system cannot return EINTR, so the libc implementation goes to pains to resume its wait'ing on the child process. That means in your use of os.system, control never returns to python until the underlying system completes, and thus the python signal handling mechanics aren't invoked timely.
subprocess.call, however, is essentially doing this:
# Compare subprocess.py:Popen/_eintr_retry_call(os.waitpid, self.pid, 0)
while True:
try:
return os.waitpid(the_child_pid, 0)
except OSError, e:
if e.errno == errno.EINTR: # signal.signal() handler already invoked
continue
raise
Here control does return to python when the underlying wait is interrupted. The OSError/EINTR prompts python to see if any signals were tripped and, if so, to invoke the user-supplied code block associated with that signal. (And that's how the interpreter adapts the system's signal semantics: set a flag, and check it between "atomic" python operations, invoking the user's code if appropriate.)

Related

How can I make a linux background process (in c) that launches a Python script

I made a Linux background process (in c++) that monitors a directory and attempts to launch a Python script if a certain file appears in that directory. My issue is that the child process responsible for launching the Python script exits immediately after the execvp function is called and I can't understand why. All of the necessary files are under root's ownership. Here is my code if it helps. Thank you in advance for any pointers! I have marked the error in my code where the error occurs. I have also included the Python script to be called
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using namespace std;
char* arguments[3];
FILE* fd;
const char* logFilePath = "/home/BluetoothProject/Logs/fileMonitorLogs.txt";
char* rfcommPath = (char*)"/home/BluetoothProject/RFCOMMOut.py";
void logToFile(const char*);
void doWork();
void logToFile(const char* str) {
fd = fopen(logFilePath, "a");
fprintf(fd, "%s\n", str);
fclose(fd);
}
int main() {
arguments[0] = (char*)"python";
arguments[1] = rfcommPath;
arguments[2] = NULL;
pid_t pid = fork();
if(pid < 0) {
printf("Fork failed");
exit(1);
} else if(pid > 0) {
exit(EXIT_SUCCESS);
}
umask(0);
pid_t sid = setsid();
if(sid < 0) {
logToFile("setsid() didn't work.");
exit(1);
}
if ((chdir("/")) < 0) {
logToFile("chdir() didn't work.");
exit(EXIT_FAILURE);
}
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
doWork();
}
void doWork() {
pid_t pid = fork();
if(pid < 0) {
logToFile("doWork() fork didn't work.");
} else if(pid > 0) {
int status = 0;
waitpid(pid, &status, 0);
if(WEXITSTATUS(status) == 1) {
logToFile("Child process exited with an error.");
}
} else {
int error = execvp(arguments[0], arguments); //Here is where the error is
if(error == -1) {
logToFile("execvp() failed.");
}
exit(1);
}
}
Python script (AKA RFCOMMOut.py)
import RPi.GPIO as gpio
import serial
led_state = 0
led_pin = 11
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(led_pin, gpio.OUT)
try:
ser = serial.Serial(port = '/dev/rfcomm0',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS)
except IOException as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(First error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
#gpio.output
def process_input(input):
global led_state
if input == "I have been sent.\n":
if led_state == 1:
led_state = 0
gpio.output(led_pin, led_state)
else:
led_state = 1
gpio.output(led_pin, led_state)
while True:
try:
transmission = ser.readline()
process_input(transmission)
except IOError as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(second error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
break
led_state = 0
gpio.output(led_pin, led_state)
gpio.cleanup()
print("End of program\n")
The question is a little unclear, so I'll try to take a few different educated guesses at what the problem is and address each one individually.
TL;DR: Remove close(STDOUT_FILENO) and close(STDERR_FILENO) to get more debugging information which will hopefully point you in the right direction.
execvp(3) is returning -1
According to the execvp(3) documentation, execvp(3) sets errno when it fails. In order to understand why it is failing, your program will need to output the value of errno somewhere; perhaps stdout, stderr, or your log file. A convenient way to do this is to use perror(3). For example:
#include <stdio.h>
...
void doWork() {
...
} else {
int error = execvp(arguments[0], arguments);
if(error == -1) {
perror("execvp() failed");
}
}
...
}
Without knowing what that errno value is, it will be difficult to identify why execvp(3) is failing.
execvp(3) is succeeding, but my Python program doesn't appear run
execvp(3) succeeding means that the Python interpreter has successfully been invoked (assuming that there is no program in your PATH that is named "python", but is not actually a Python interpreter). If your program doesn't appear to be running, that means Python is having difficulty loading your program. To my knowledge, Python will always output relevant error messages in this situation to stderr; for example, if Python cannot find your program, it will output "No such file or directory" to stderr.
However, it looks like your C program is calling close(STDERR_FILENO) before calling doWork(). According to fork(2), child processes inherit copies of their parent's set of open file descriptors. This means that calling close(STDERR_FILENO) before forking will result in the child process not having an open stderr file descriptor. If Python is having any errors executing your program, you'll never know, since Python is trying to notify you through a file descriptor that doesn't exist. If execvp(3) is succeeding and the Python program appears to not run at all, then I recommend you remove close(STDERR_FILENO) from your C program and run everything again. Without seeing the error message output by Python, it will be difficult to identify why it is failing to run the Python program.
As an aside, I recommend against explicitly closing stdin, stdout, and stderr. According to stdin(3), the standard streams are closed by a call to exit(3) and by normal program termination.
execvp(3) is succeeding, my Python program is running, but my Python program exits before it does any useful work
In this case, I'm not sure what the problem might be, since I'm not very familiar with Raspberry Pi. But I think you'll have an easier time debugging if you don't close the standard streams before running the Python program.
Hope this helps.

How to properly capture output of process using pwntools

I'm currently confused on how to use the pwntools library for python3 for exploiting programs - mainly sending the input into a vulnerable program.
This is my current python script.
from pwn import *
def executeVuln():
vulnBin = process("./buf2", stdin=PIPE, stdout=PIPE)
vulnBin.sendlineafter(': ','A'*90)
output = vulnBin.recvline(timeout=5)
print(output)
executeVuln()
The program I'm trying to exploit is below - This isn't about how to exploit the program, more on using the script to properly automate it.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#define BUFSIZE 176
#define FLAGSIZE 64
void flag(unsigned int arg1, unsigned int arg2) {
char buf[FLAGSIZE];
FILE *f = fopen("flag.txt","r");
if (f == NULL) {
printf("Flag File is Missing. Problem is Misconfigured, please contact an Admin if you are running this on the shell server.\n");
exit(0);
}
fgets(buf,FLAGSIZE,f);
if (arg1 != 0xDEADBEEF)
return;
if (arg2 != 0xC0DED00D)
return;
printf(buf);
}
void vuln(){
char buf[BUFSIZE];
gets(buf);
puts(buf);
}
int main(int argc, char **argv){
setvbuf(stdout, NULL, _IONBF, 0);
gid_t gid = getegid();
setresgid(gid, gid, gid);
puts("Please enter your string: ");
vuln();
return 0;
}
The process is opened fine.
sendlineafter blocks until it sends the line and so if it doesn't match it waits indefinitely. However, it runs fine and so the input should be sent.
output should receive 90 A's from recvLine due to
puts(buffer) outputting the inputted string.
However, all that is returned is
b'', which seems to indicate that the vulnerable program isn't receiving the input and returning an empty string.
Anyone know what's causing this?
With the above programs, I'm getting the print(output) as b'\n' (not b'') and here is the explanation for it.
The puts() statement outputs a newline character at the end and it is not read by the sendlineafter() call, which in-turn leads the stray newline character to be read by the below recvline() printing b'\n'.
Why the newline character is not by read by sendlineafter()? Because the sendlineafter() is just a combination of recvuntil() and sendline(), where recvuntil() only reads till delimiter leaving characters after. (pwntools docs)
So the solution for this is to read the newline character with sendlineafter() like below (or by calling recvline() twice),
from pwn import *
def executeVuln():
vulnBin = process("./buf2", stdin=PIPE, stdout=PIPE)
vulnBin.sendlineafter(b': \n',b'A'*90)
output = vulnBin.recvline(timeout=5)
print(output)
executeVuln()
Output:
[+] Starting local process './buf2': pid 3493
[*] Process './buf2' stopped with exit code 0 (pid 3493)
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n'
Note:
I added the strings as bytes in sendlineafter() to remove the below BytesWarning.
program.py:5: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes
vulnBin.sendlineafter(': \n','A'*90)

After an exec, signal SIGCHLD is not SIG_IGN even though it was SIG_IGN before

This is my program popen_test.cpp:
int main(int argc, char* argv[])
{
signal(SIGCHLD, SIG_IGN);
sigset_t sset;
sigaddset(&sset, SIGCHLD);
sigprocmask(SIG_UNBLOCK, &sset, &sset);
char command[128] = {0};
snprintf(command, sizeof(command), "python popen_test.py");
FILE* file;
file = popen(command, "r");
if ( !file )
{
printf("command: %s is error\n", command);
return -1;
}
char result[256]={0};
int len = fread(result, sizeof(char), 256, file);
printf("result is :%s\n", result);
int ret = pclose(file);
if (ret == -1)
{
printf("pclose error:%d, errno:%d, str_err:%s\n", ret, errno, strerror(errno));
return -1;
}
}
Script is
import os
import signal
import subprocess
if __name__ == "__main__":
test = signal.getsignal(signal.SIGCHLD)
if test == signal.SIG_DFL:
print "sig_dfl"
if test == signal.SIG_IGN:
print "sig_ign"
print test
POPEN forks a child process; child process calls exec to run python popen_test.py, I know when exec is called, the SIGCHLD signal (when set to SIG_IGN) may or may not be reset to SIG_DFL. Why is the result sig_dfl, not sig_ign?
You are not respecting the restrict qualifiers on the arguments to sigprocmask(). Your compiler should be complaining about using the same argument twice.
The POSIX specification for execv() documents:
Signals set to the default action (SIG_DFL) in the calling process image shall be set to the default action in the new process image. Except for SIGCHLD, signals set to be ignored (SIG_IGN) by the calling process image shall be set to be ignored by the new process image.
If the SIGCHLD signal is set to be ignored by the calling process image, it is unspecified whether the SIGCHLD signal is set to be ignored or to the default action in the new process image.
The OP comments:
But the 《Advanced Programming in the UNIX Environment》 says if the SIGXXX signal is set to be ignored by the calling process image, In the new process the signal is also to be ignored?
The difference is that APUE is dealing with UNIX; UNIX is not actually POSIX (though the two are close). POSIX says that a platform can be POSIX-compliant and still reset SIGCHLD to SIG_DFL even if it was SIG_IGN in the original process. Your code appears to demonstrate that your platform is why POSIX gives the exemption for SIGCHLD.
Out of curiosity, which platform are you using?

subprocess.popen (python) in C

Like the question says, I am searching for the subprocess.open module in C
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid)
print p.pid
I am trying to spawn a process and then get the process ID.
`cmd` is a c program that is spawned
In C, I believe that the getpid() does the job.
But after I spawn the process, I don't know how to tell getpid() to get that spawned process, pid.
void run_apps(pid) {
printf("process ID is %d\n", (int) getpid());
}
That is obviously giving the current process ID.
In C, you get the maximum number of options by first calling fork() to create a new process (the eventual subprocess), then one of the exec*() family of functions to execute the subprocess. Both the original process and the new process will run concurrently, so you can exchange (read and/or write data) via pipes or socket pairs. Finally, use e.g. waitpid() in a loop to wait for the new process to exit, and "reap" its exit status. For example:
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
int main(void)
{
pid_t child, p;
int status;
/*
* Prepare pipes et cetera first.
*/
/* Fork to create the subprocess. */
child = fork();
if (child == (pid_t)-1) {
/* Cannot fork(); usually out of resources (user limits).
* see errno for details. With <string.h>, you can use
* strerror(errno) to obtain the error string itself. */
return 1;
} else
if (!child) {
/* This is the child process itself.
* Do whatever cleanup is necessary, then
* execute the subprocess command. */
execlp("/bin/ls", "ls", "-lA", NULL);
/* This is only reached if the exec failed;
* again, see errno for reason.
* Always have the child process exit! */
return 127;
}
/* This is only run by the parent process
* (because the child always exits within the
* else if body above).
*
* The subprocess PID is 'child'.
*/
/* Wait for the child process to exit. */
do {
status = 0;
p = waitpid(child, &status, 0);
if (p == (pid_t)-1 && errno != EINTR)
break; /* Error */
} while (p != child);
if (p != child) {
/* Child process was lost.
* If (p == (pid_t)-1), errno describes the error.
*/
} else
if (WIFEXITED(status)) {
/* Child process exited with WEXITSTATUS(status) status.
* A status of 0 (or EXIT_SUCCESS) means success,
* no errors occurred. Nonzero usually means an error,
* but codes vary from binary to binary.
*/
} else
if (WIFSIGNALED(status)) {
/* Child process died from WTERMSIG(status) signal.
* If you include <string.h>, you can use
* strsignal(WTERMSIG(status))
* to obtain the name (string) of the terminating signal.
*/
} else {
/* Child process died from unknown causes.
*/
}
/* All done. */
return 0;
}
Personally, I prefer to use socketpair() to create Unix domain stream or datagram sockets between processes I control, and pipes created via pipe() if the subprocess is just some random binary to be run. In all cases you can replace the standard input (STDIN_FILENO descriptor), standard output (STDOUT_FILENO descriptor), and standard error (STDERR_FILENO descriptor) with sockets or pipes, using the dup2() function. You can even access the pseudo-files under /proc/[child]/ from the parent to observe the state of the child process if you want.
Depending on how you need to communicate with the subprocess -- input/output from/to files? strings in memory? dynamically allocated buffer for output -- there are many variants. Usually code similar to above is used when precise control and/or full-duplex (both read and write) and/or asynchronous communications are needed.
You can do a search for "linux" "fork" "exec" in your favourite search engine for examples of varying quality.
If you want an easier solution, and you only need to capture the output of the command (supplying no input to the command, or perhaps supplying the input from a file), you could use some variant of
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
int main(void)
{
FILE *sub;
pid_t subpid;
int status;
sub = popen("setsid /bin/sh -c 'echo $$ ; exec command args' </dev/null", "rb");
if (!sub) {
/* popen() failed. */
return 1;
}
/* Read the first line from sub. It contains the PID for the command. */
{
char buffer[32], *line, dummy;
int value;
line = fgets(buffer, sizeof buffer, sub);
if (!line) {
/* setsid failed, or non-POSIXy system (Windows?) */
pclose(sub);
return 1;
}
if (sscanf(line, "%d%c", &value, &dummy) != 1 || value < 2) {
/* Internal bug, or extra output? */
pclose(sub);
return 1;
}
/* subpid is also the session ID and the process group ID,
* because it is the session leader. */
subpid = value;
}
/* Read from sub using standard I/O, to capture command output. */
/* After no more output to read from sub, reap the subprocess. */
errno = 0;
do {
status = pclose(sub);
} while (status == -1 && errno == EINTR);
if (status) {
/* Problem: sub exited with nonzero exit status 'status',
* or if status == -1, some other error occurred. */
} else {
/* Sub exited with success (zero exit status). */
}
/* Done. */
return 0;
}
In Linux, popen() uses the /bin/sh shell (as per POSIX.1 specification), and we can use the setsid command-line utility to create the new session. Within the command, the echo $$ is a sh command which outputs the shell PID, and exec CMD... replaces the shell with the command; thus we get the PID of the command even before the command gets executed.

How to prevent embedded python to exit() my process

I'm having trouble while running embedded python. It turns out that I can't capture that SystemExit exception raised by sys.exit();
This is what I have so far:
$ cat call.c
#include <Python.h>
int main(int argc, char *argv[])
{
Py_InitializeEx(0);
PySys_SetArgv(argc-1, argv+1);
if (PyRun_AnyFileEx(fopen(argv[1], "r"), argv[1], 1) != 0) {
PyObject *exc = PyErr_Occurred();
printf("terminated by %s\n",
PyErr_GivenExceptionMatches(exc, PyExc_SystemExit) ?
"exit()" : "exception");
}
Py_Finalize();
return 0;
}
Also, my script is:
$ cat unittest-files/python-return-code.py
from sys import exit
exit(99)
Running it:
$ ./call unittest-files/python-return-code.py
$ echo $?
99
I must execute a file, not a command.
PyRun_SimpleFileExFlags function (and all functions using it, including PyRun_AnyFileEx) handles exceptions itself by exiting for SystemExit or printing traceback. Use PyRun_File* family of functions to handle exceptions in surrounding code.

Categories

Resources