I'm having trouble while running embedded python. It turns out that I can't capture that SystemExit exception raised by sys.exit();
This is what I have so far:
$ cat call.c
#include <Python.h>
int main(int argc, char *argv[])
{
Py_InitializeEx(0);
PySys_SetArgv(argc-1, argv+1);
if (PyRun_AnyFileEx(fopen(argv[1], "r"), argv[1], 1) != 0) {
PyObject *exc = PyErr_Occurred();
printf("terminated by %s\n",
PyErr_GivenExceptionMatches(exc, PyExc_SystemExit) ?
"exit()" : "exception");
}
Py_Finalize();
return 0;
}
Also, my script is:
$ cat unittest-files/python-return-code.py
from sys import exit
exit(99)
Running it:
$ ./call unittest-files/python-return-code.py
$ echo $?
99
I must execute a file, not a command.
PyRun_SimpleFileExFlags function (and all functions using it, including PyRun_AnyFileEx) handles exceptions itself by exiting for SystemExit or printing traceback. Use PyRun_File* family of functions to handle exceptions in surrounding code.
Related
I made a Linux background process (in c++) that monitors a directory and attempts to launch a Python script if a certain file appears in that directory. My issue is that the child process responsible for launching the Python script exits immediately after the execvp function is called and I can't understand why. All of the necessary files are under root's ownership. Here is my code if it helps. Thank you in advance for any pointers! I have marked the error in my code where the error occurs. I have also included the Python script to be called
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using namespace std;
char* arguments[3];
FILE* fd;
const char* logFilePath = "/home/BluetoothProject/Logs/fileMonitorLogs.txt";
char* rfcommPath = (char*)"/home/BluetoothProject/RFCOMMOut.py";
void logToFile(const char*);
void doWork();
void logToFile(const char* str) {
fd = fopen(logFilePath, "a");
fprintf(fd, "%s\n", str);
fclose(fd);
}
int main() {
arguments[0] = (char*)"python";
arguments[1] = rfcommPath;
arguments[2] = NULL;
pid_t pid = fork();
if(pid < 0) {
printf("Fork failed");
exit(1);
} else if(pid > 0) {
exit(EXIT_SUCCESS);
}
umask(0);
pid_t sid = setsid();
if(sid < 0) {
logToFile("setsid() didn't work.");
exit(1);
}
if ((chdir("/")) < 0) {
logToFile("chdir() didn't work.");
exit(EXIT_FAILURE);
}
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
doWork();
}
void doWork() {
pid_t pid = fork();
if(pid < 0) {
logToFile("doWork() fork didn't work.");
} else if(pid > 0) {
int status = 0;
waitpid(pid, &status, 0);
if(WEXITSTATUS(status) == 1) {
logToFile("Child process exited with an error.");
}
} else {
int error = execvp(arguments[0], arguments); //Here is where the error is
if(error == -1) {
logToFile("execvp() failed.");
}
exit(1);
}
}
Python script (AKA RFCOMMOut.py)
import RPi.GPIO as gpio
import serial
led_state = 0
led_pin = 11
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(led_pin, gpio.OUT)
try:
ser = serial.Serial(port = '/dev/rfcomm0',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS)
except IOException as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(First error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
#gpio.output
def process_input(input):
global led_state
if input == "I have been sent.\n":
if led_state == 1:
led_state = 0
gpio.output(led_pin, led_state)
else:
led_state = 1
gpio.output(led_pin, led_state)
while True:
try:
transmission = ser.readline()
process_input(transmission)
except IOError as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(second error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
break
led_state = 0
gpio.output(led_pin, led_state)
gpio.cleanup()
print("End of program\n")
The question is a little unclear, so I'll try to take a few different educated guesses at what the problem is and address each one individually.
TL;DR: Remove close(STDOUT_FILENO) and close(STDERR_FILENO) to get more debugging information which will hopefully point you in the right direction.
execvp(3) is returning -1
According to the execvp(3) documentation, execvp(3) sets errno when it fails. In order to understand why it is failing, your program will need to output the value of errno somewhere; perhaps stdout, stderr, or your log file. A convenient way to do this is to use perror(3). For example:
#include <stdio.h>
...
void doWork() {
...
} else {
int error = execvp(arguments[0], arguments);
if(error == -1) {
perror("execvp() failed");
}
}
...
}
Without knowing what that errno value is, it will be difficult to identify why execvp(3) is failing.
execvp(3) is succeeding, but my Python program doesn't appear run
execvp(3) succeeding means that the Python interpreter has successfully been invoked (assuming that there is no program in your PATH that is named "python", but is not actually a Python interpreter). If your program doesn't appear to be running, that means Python is having difficulty loading your program. To my knowledge, Python will always output relevant error messages in this situation to stderr; for example, if Python cannot find your program, it will output "No such file or directory" to stderr.
However, it looks like your C program is calling close(STDERR_FILENO) before calling doWork(). According to fork(2), child processes inherit copies of their parent's set of open file descriptors. This means that calling close(STDERR_FILENO) before forking will result in the child process not having an open stderr file descriptor. If Python is having any errors executing your program, you'll never know, since Python is trying to notify you through a file descriptor that doesn't exist. If execvp(3) is succeeding and the Python program appears to not run at all, then I recommend you remove close(STDERR_FILENO) from your C program and run everything again. Without seeing the error message output by Python, it will be difficult to identify why it is failing to run the Python program.
As an aside, I recommend against explicitly closing stdin, stdout, and stderr. According to stdin(3), the standard streams are closed by a call to exit(3) and by normal program termination.
execvp(3) is succeeding, my Python program is running, but my Python program exits before it does any useful work
In this case, I'm not sure what the problem might be, since I'm not very familiar with Raspberry Pi. But I think you'll have an easier time debugging if you don't close the standard streams before running the Python program.
Hope this helps.
I'm currently confused on how to use the pwntools library for python3 for exploiting programs - mainly sending the input into a vulnerable program.
This is my current python script.
from pwn import *
def executeVuln():
vulnBin = process("./buf2", stdin=PIPE, stdout=PIPE)
vulnBin.sendlineafter(': ','A'*90)
output = vulnBin.recvline(timeout=5)
print(output)
executeVuln()
The program I'm trying to exploit is below - This isn't about how to exploit the program, more on using the script to properly automate it.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#define BUFSIZE 176
#define FLAGSIZE 64
void flag(unsigned int arg1, unsigned int arg2) {
char buf[FLAGSIZE];
FILE *f = fopen("flag.txt","r");
if (f == NULL) {
printf("Flag File is Missing. Problem is Misconfigured, please contact an Admin if you are running this on the shell server.\n");
exit(0);
}
fgets(buf,FLAGSIZE,f);
if (arg1 != 0xDEADBEEF)
return;
if (arg2 != 0xC0DED00D)
return;
printf(buf);
}
void vuln(){
char buf[BUFSIZE];
gets(buf);
puts(buf);
}
int main(int argc, char **argv){
setvbuf(stdout, NULL, _IONBF, 0);
gid_t gid = getegid();
setresgid(gid, gid, gid);
puts("Please enter your string: ");
vuln();
return 0;
}
The process is opened fine.
sendlineafter blocks until it sends the line and so if it doesn't match it waits indefinitely. However, it runs fine and so the input should be sent.
output should receive 90 A's from recvLine due to
puts(buffer) outputting the inputted string.
However, all that is returned is
b'', which seems to indicate that the vulnerable program isn't receiving the input and returning an empty string.
Anyone know what's causing this?
With the above programs, I'm getting the print(output) as b'\n' (not b'') and here is the explanation for it.
The puts() statement outputs a newline character at the end and it is not read by the sendlineafter() call, which in-turn leads the stray newline character to be read by the below recvline() printing b'\n'.
Why the newline character is not by read by sendlineafter()? Because the sendlineafter() is just a combination of recvuntil() and sendline(), where recvuntil() only reads till delimiter leaving characters after. (pwntools docs)
So the solution for this is to read the newline character with sendlineafter() like below (or by calling recvline() twice),
from pwn import *
def executeVuln():
vulnBin = process("./buf2", stdin=PIPE, stdout=PIPE)
vulnBin.sendlineafter(b': \n',b'A'*90)
output = vulnBin.recvline(timeout=5)
print(output)
executeVuln()
Output:
[+] Starting local process './buf2': pid 3493
[*] Process './buf2' stopped with exit code 0 (pid 3493)
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n'
Note:
I added the strings as bytes in sendlineafter() to remove the below BytesWarning.
program.py:5: BytesWarning: Text is not bytes; assuming ASCII, no guarantees. See https://docs.pwntools.com/#bytes
vulnBin.sendlineafter(': \n','A'*90)
This question already has answers here:
Dynamic communication between main and subprocess in Python
(2 answers)
Closed 3 years ago.
i want to write a Python script that creates a subprocess, reads from the stdout and writes to stdin. What is written to stdin should depend on whats been read from stdout.
I've tried pretty much everything i could find about subprocess.Popen, but nothing worked out.
Basically, i want to write a script that makes the following c-code print "success":
#include <stdio.h>
#include <stlib.h>
int main()
{
int var, inp;
for (int i = 0; i < 100; i++) {
var = rand();
printf("Please type %d:\n", var); //random var. is printed
scanf("%d", &inp); //inp == var?
if (inp != var) {
printf("you failed miserably\n");
return 0;
}
}
printf("success\n");
return 0;
}
I'm failing at reading from stdout while still keeping the subprocess alive. The task seems so simple, but I can't find a simple solution.
Python code that i would expect to work:
from subprocess import *
def getNum(s): # "Please type 1234567:\t" -> "1234567"
return "".join([t for t in s if t.isdigit()])
p = Popen("./io", stdin=PIPE, stdout=PIPE) #io is the binary obtained from above c code
for i in range(100):
out = p.stdout.readline() #script deadlocks here
print( out )
inp = getNum(out)+"\n"#convert out into desired inp
p.stdin.write(inp)
print (p.communicate()[0]) #kill p and get last output
This approach might be a little naive, but i also don't understand why it doesn't work.
The python program gets stuck waiting to receive something from stdout. This can be due to stdout buffering. There are probably a few ways to change this behavior, but one quick way to test this out is to change the C executable to force a fflush right after it prints out the randomly generated number. The C code would look like this:
#include <stdio.h>
#include <stdlib.h>
int main()
{
int var, inp;
for (int i = 0; i < 100; i++) {
var = rand();
printf("Please type %d:\n", var); //random var. is printed
// ADDED EXPLICIT stdout flush
fflush(stdout);
scanf("%d", &inp); //inp == var?
if (inp != var) {
printf("you failed miserably\n");
return 0;
}
}
printf("success\n");
return 0;
}
With that change, the original python script is able to drive the whole interaction.
When I run this Python script with os.system on Ubuntu 12.04:
import os, signal
signal.signal(signal.SIGABRT, lambda *args: os.write(2, 'HANDLER\n'))
print 'status=%r' % os.system('sleep 5')
, and then I send SIGABRT to the script process many times within 5 seconds, I get the following output:
status=0
HANDLER
This indicates that the signal delivery was blocked until sleep 5 exited, and then only a single signal was delivered.
However, with subprocess.call:
import os, signal, subprocess
signal.signal(signal.SIGABRT, lambda *args: os.write(2, 'HANDLER\n'))
print 'cstatus=%r' % subprocess.call('sleep 5', shell=True)
, all individual signals are delivered early:
HANDLER
HANDLER
HANDLER
cstatus=0
To distinguish the magic in glibc from the magic in Python, I rewrote the Python script in C, so os.system became system(3):
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static void handler(int signum) { (void)signum; write(2, "HANDLER\n", 8); }
int main(int argc, char **argv) {
int got;
struct sigaction sa;
(void)argc; (void)argv;
memset(&sa, 0, sizeof sa);
sa.sa_handler = handler;
if (0 != sigaction(SIGABRT, &sa, NULL)) return 3;
got = system("sleep 5");
return !printf("system=0x%x\n", got);
}
Signals got delivered early:
HANDLER
HANDLER
HANDLER
system=0x0
So I inferred that the magic is in Python 2.7, not in eglibc. But where is the magic? Based on the strace output and looking at the posix_system function in Modules/posixmodule.c, I couldn't figure out how Python blocks the signal until os.system returns.
Relevant code from Modules/posixmodule.c:
static PyObject *posix_system(PyObject *self, PyObject *args) {
char *command;
long sts;
if (!PyArg_ParseTuple(args, "s:system", &command)) return NULL;
Py_BEGIN_ALLOW_THREADS
sts = system(command);
Py_END_ALLOW_THREADS
return PyInt_FromLong(sts);
}
Maybe the magic is in Py_BEGIN_ALLOW_THREADS?
Do I understand correctly that it's impossible for my Python signal handler (set up by signal.signal) to execute until os.system returns?
Is it because signal handlers are blocked (on the Python level, not on the OS level) until Py_END_ALLOW_THREADS returns?
Here is the strace output of the Python code with os.system: http://pastebin.com/Wjn9KBye
Maybe the magic is in PY_BEGIN_ALLOW_THREADS?
The magic is mostly in system itself. system cannot return EINTR, so the libc implementation goes to pains to resume its wait'ing on the child process. That means in your use of os.system, control never returns to python until the underlying system completes, and thus the python signal handling mechanics aren't invoked timely.
subprocess.call, however, is essentially doing this:
# Compare subprocess.py:Popen/_eintr_retry_call(os.waitpid, self.pid, 0)
while True:
try:
return os.waitpid(the_child_pid, 0)
except OSError, e:
if e.errno == errno.EINTR: # signal.signal() handler already invoked
continue
raise
Here control does return to python when the underlying wait is interrupted. The OSError/EINTR prompts python to see if any signals were tripped and, if so, to invoke the user-supplied code block associated with that signal. (And that's how the interpreter adapts the system's signal semantics: set a flag, and check it between "atomic" python operations, invoking the user's code if appropriate.)
I want to call a Python script from C, passing some arguments that are needed in the script.
The script I want to use is mrsync, or multicast remote sync. I got this working from command line, by calling:
python mrsync.py -m /tmp/targets.list -s /tmp/sourcedata -t /tmp/targetdata
-m is the list containing the target ip-addresses.
-s is the directory that contains the files to be synced.
-t is the directory on the target machines where the files will be put.
So far I managed to run a Python script without parameters, by using the following C program:
Py_Initialize();
FILE* file = fopen("/tmp/myfile.py", "r");
PyRun_SimpleFile(file, "/tmp/myfile.py");
Py_Finalize();
This works fine. However, I can't find how I can pass these argument to the PyRun_SimpleFile(..) method.
Seems like you're looking for an answer using the python development APIs from Python.h. Here's an example for you that should work:
#My python script called mypy.py
import sys
if len(sys.argv) != 2:
sys.exit("Not enough args")
ca_one = str(sys.argv[1])
ca_two = str(sys.argv[2])
print "My command line args are " + ca_one + " and " + ca_two
And then the C code to pass these args:
//My code file
#include <stdio.h>
#include <python2.7/Python.h>
void main()
{
FILE* file;
int argc;
char * argv[3];
argc = 3;
argv[0] = "mypy.py";
argv[1] = "-m";
argv[2] = "/tmp/targets.list";
Py_SetProgramName(argv[0]);
Py_Initialize();
PySys_SetArgv(argc, argv);
file = fopen("mypy.py","r");
PyRun_SimpleFile(file, "mypy.py");
Py_Finalize();
return;
}
If you can pass the arguments into your C function this task becomes even easier:
void main(int argc, char *argv[])
{
FILE* file;
Py_SetProgramName(argv[0]);
Py_Initialize();
PySys_SetArgv(argc, argv);
file = fopen("mypy.py","r");
PyRun_SimpleFile(file, "mypy.py");
Py_Finalize();
return;
}
You can just pass those straight through. Now my solutions only used 2 command line args for the sake of time, but you can use the same concept for all 6 that you need to pass... and of course there's cleaner ways to capture the args on the python side too, but that's just the basic idea.
Hope it helps!
You have two options.
Call
system("python mrsync.py -m /tmp/targets.list -s /tmp/sourcedata -t /tmp/targetdata")
in your C code.
Actually use the API that mrsync (hopefully) defines. This is more flexible, but much more complicated. The first step would be to work out how you would perform the above operation as a Python function call. If mrsync has been written nicely, there will be a function mrsync.sync (say) that you call as
mrsync.sync("/tmp/targets.list", "/tmp/sourcedata", "/tmp/targetdata")
Once you've worked out how to do that, you can call the function directly from the C code using the Python API.