Python sys.stdin.buffer size detection - python

I'm trying to execute a Python script from a Qt application and to communicate with the script via standard input and output (as one would do via common Unix pipes). My calling code stub looks like this:
int main(int argc, char *argv[]) {
QCoreApplication a(argc, argv);
QProcess process;
QTimer timer;
QObject::connect(&process, &QProcess::readyReadStandardOutput, [&process]() {
qDebug() << "<<o" << process.readAllStandardOutput();
});
QObject::connect(&process, &QProcess::readyReadStandardError, [&process]() {
qDebug() << "<<e" << process.readAllStandardError();
});
QObject::connect(&process, &QProcess::started, [&process] {
qDebug() << "Process name" << process.program() << process.processId();
});
QObject::connect(&timer, &QTimer::timeout, [&process]() {
qDebug() << process.state();
QByteArray ba("12345");
qDebug() << ">>" << ba;
process.write(ba);
if (!process.waitForBytesWritten())
qDebug() << process.errorString();
});
QObject::connect(&a, &QCoreApplication::aboutToQuit, [&]() {
process.terminate();
process.kill();
});
process.start("python3", {"main.py"});
// process.start("cat", QStringList{});
timer.start(2000);
a.exec();
process.terminate();
process.kill();
return 0;
}
And my Python script is shown below:
import sys, time
def process_data(data):
size=len(data)
if size %2:
print(f'Odd, {size}',data)
else:
print(f'Even, {size}',data)
sys.stdout.flush()
if __name__ == '__main__':
while True:
data=sys.stdin.buffer.read(5)
if len(data):
process_data(data)
else:
print('.')
time.sleep(0.02)
The thing is that I want to have my script react on any incoming buffer, much like a cat command does. When I comment out the line calling my script and uncomment the one calling the cat command, each time I send a buffer, I receive a reply, which is what I want. But when I'm calling a Python script, I have no means of detecting incoming buffer size that I know of. Explicitly setting a value in a sys.stdin.buffer.read command allows me not to wait for an EOF, but I want to receive a buffer without knowing its size in advance. In Qt I would achieve such behavior by calling readAll() method of a QIODevice. Is there a way of doing the same in Python?
I have tried calling sys.stdin.buffer.read() without any arguments, expecting it to behave like QIODevice::readAll() - producing a buffer with all the data read so far. But obviously it produces nothing until it receives an EOF. I hope there is a kind of method that yields a size of buffer received so that I could write:
size=stdin.buffer.bytesReceived()
data=stdin.buffer.read(size)
yet such method seems to be missing.
Does anyone know of any solution to this problem?

The problem is solved by changing sys.stdin.buffer.read line to:
data = sys.stdin.buffer.raw.read(20000)
This also works:
data = sys.stdin.buffer.read1(20000)
This answer was posted as edit 1 and edit 2 to the question Python sys.stdin.buffer size detection [solved] by the OP Kirill Didkovsky under CC BY-SA 4.0.

Related

How can I make a linux background process (in c) that launches a Python script

I made a Linux background process (in c++) that monitors a directory and attempts to launch a Python script if a certain file appears in that directory. My issue is that the child process responsible for launching the Python script exits immediately after the execvp function is called and I can't understand why. All of the necessary files are under root's ownership. Here is my code if it helps. Thank you in advance for any pointers! I have marked the error in my code where the error occurs. I have also included the Python script to be called
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using namespace std;
char* arguments[3];
FILE* fd;
const char* logFilePath = "/home/BluetoothProject/Logs/fileMonitorLogs.txt";
char* rfcommPath = (char*)"/home/BluetoothProject/RFCOMMOut.py";
void logToFile(const char*);
void doWork();
void logToFile(const char* str) {
fd = fopen(logFilePath, "a");
fprintf(fd, "%s\n", str);
fclose(fd);
}
int main() {
arguments[0] = (char*)"python";
arguments[1] = rfcommPath;
arguments[2] = NULL;
pid_t pid = fork();
if(pid < 0) {
printf("Fork failed");
exit(1);
} else if(pid > 0) {
exit(EXIT_SUCCESS);
}
umask(0);
pid_t sid = setsid();
if(sid < 0) {
logToFile("setsid() didn't work.");
exit(1);
}
if ((chdir("/")) < 0) {
logToFile("chdir() didn't work.");
exit(EXIT_FAILURE);
}
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
doWork();
}
void doWork() {
pid_t pid = fork();
if(pid < 0) {
logToFile("doWork() fork didn't work.");
} else if(pid > 0) {
int status = 0;
waitpid(pid, &status, 0);
if(WEXITSTATUS(status) == 1) {
logToFile("Child process exited with an error.");
}
} else {
int error = execvp(arguments[0], arguments); //Here is where the error is
if(error == -1) {
logToFile("execvp() failed.");
}
exit(1);
}
}
Python script (AKA RFCOMMOut.py)
import RPi.GPIO as gpio
import serial
led_state = 0
led_pin = 11
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(led_pin, gpio.OUT)
try:
ser = serial.Serial(port = '/dev/rfcomm0',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS)
except IOException as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(First error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
#gpio.output
def process_input(input):
global led_state
if input == "I have been sent.\n":
if led_state == 1:
led_state = 0
gpio.output(led_pin, led_state)
else:
led_state = 1
gpio.output(led_pin, led_state)
while True:
try:
transmission = ser.readline()
process_input(transmission)
except IOError as e:
logFile = open("/home/BluetoothProject/Logs/fileMonitorLogs.txt", "a")
logFile.write("(second error handler) There was an exception:\n")
logFile.write(str(e))
logFile.write("\n")
logFile.close()
break
led_state = 0
gpio.output(led_pin, led_state)
gpio.cleanup()
print("End of program\n")
The question is a little unclear, so I'll try to take a few different educated guesses at what the problem is and address each one individually.
TL;DR: Remove close(STDOUT_FILENO) and close(STDERR_FILENO) to get more debugging information which will hopefully point you in the right direction.
execvp(3) is returning -1
According to the execvp(3) documentation, execvp(3) sets errno when it fails. In order to understand why it is failing, your program will need to output the value of errno somewhere; perhaps stdout, stderr, or your log file. A convenient way to do this is to use perror(3). For example:
#include <stdio.h>
...
void doWork() {
...
} else {
int error = execvp(arguments[0], arguments);
if(error == -1) {
perror("execvp() failed");
}
}
...
}
Without knowing what that errno value is, it will be difficult to identify why execvp(3) is failing.
execvp(3) is succeeding, but my Python program doesn't appear run
execvp(3) succeeding means that the Python interpreter has successfully been invoked (assuming that there is no program in your PATH that is named "python", but is not actually a Python interpreter). If your program doesn't appear to be running, that means Python is having difficulty loading your program. To my knowledge, Python will always output relevant error messages in this situation to stderr; for example, if Python cannot find your program, it will output "No such file or directory" to stderr.
However, it looks like your C program is calling close(STDERR_FILENO) before calling doWork(). According to fork(2), child processes inherit copies of their parent's set of open file descriptors. This means that calling close(STDERR_FILENO) before forking will result in the child process not having an open stderr file descriptor. If Python is having any errors executing your program, you'll never know, since Python is trying to notify you through a file descriptor that doesn't exist. If execvp(3) is succeeding and the Python program appears to not run at all, then I recommend you remove close(STDERR_FILENO) from your C program and run everything again. Without seeing the error message output by Python, it will be difficult to identify why it is failing to run the Python program.
As an aside, I recommend against explicitly closing stdin, stdout, and stderr. According to stdin(3), the standard streams are closed by a call to exit(3) and by normal program termination.
execvp(3) is succeeding, my Python program is running, but my Python program exits before it does any useful work
In this case, I'm not sure what the problem might be, since I'm not very familiar with Raspberry Pi. But I think you'll have an easier time debugging if you don't close the standard streams before running the Python program.
Hope this helps.

Handling stdin and stdout of subprocess [duplicate]

This question already has answers here:
Dynamic communication between main and subprocess in Python
(2 answers)
Closed 3 years ago.
i want to write a Python script that creates a subprocess, reads from the stdout and writes to stdin. What is written to stdin should depend on whats been read from stdout.
I've tried pretty much everything i could find about subprocess.Popen, but nothing worked out.
Basically, i want to write a script that makes the following c-code print "success":
#include <stdio.h>
#include <stlib.h>
int main()
{
int var, inp;
for (int i = 0; i < 100; i++) {
var = rand();
printf("Please type %d:\n", var); //random var. is printed
scanf("%d", &inp); //inp == var?
if (inp != var) {
printf("you failed miserably\n");
return 0;
}
}
printf("success\n");
return 0;
}
I'm failing at reading from stdout while still keeping the subprocess alive. The task seems so simple, but I can't find a simple solution.
Python code that i would expect to work:
from subprocess import *
def getNum(s): # "Please type 1234567:\t" -> "1234567"
return "".join([t for t in s if t.isdigit()])
p = Popen("./io", stdin=PIPE, stdout=PIPE) #io is the binary obtained from above c code
for i in range(100):
out = p.stdout.readline() #script deadlocks here
print( out )
inp = getNum(out)+"\n"#convert out into desired inp
p.stdin.write(inp)
print (p.communicate()[0]) #kill p and get last output
This approach might be a little naive, but i also don't understand why it doesn't work.
The python program gets stuck waiting to receive something from stdout. This can be due to stdout buffering. There are probably a few ways to change this behavior, but one quick way to test this out is to change the C executable to force a fflush right after it prints out the randomly generated number. The C code would look like this:
#include <stdio.h>
#include <stdlib.h>
int main()
{
int var, inp;
for (int i = 0; i < 100; i++) {
var = rand();
printf("Please type %d:\n", var); //random var. is printed
// ADDED EXPLICIT stdout flush
fflush(stdout);
scanf("%d", &inp); //inp == var?
if (inp != var) {
printf("you failed miserably\n");
return 0;
}
}
printf("success\n");
return 0;
}
With that change, the original python script is able to drive the whole interaction.

Run .py script in Qt

i want to run (after i click a button) a .py Script
I already tried the following Code
QProcess p;
QStringList params;
params << "createJSON.py";
p.start("python.exe", params);
p.waitForFinished(-1);
QString p_stdout = p.readAll();
My Python Script create a JSON-File when it runs successfully.
So i can see if the runs successfully.
I have been able to write a more detailed version of your code.
QProcess p;
QStringList params;
params << "createJSON.py";
QObject::connect(&p, &QProcess::started, []() {
qInfo() << "Process started!";
});
QObject::connect(&p, &QProcess::errorOccurred, [&p]() {
qWarning() << "Error occurred" << p.errorString();
});
p.start("python.exe", params);
p.waitForFinished(-1);
QString p_stdout = p.readAllStandardOutput();
QString p_stderr = p.readAllStandardError();
qDebug() << "OUT" << p_stdout;
qDebug() << "ERR" << p_stderr;
This effectively leads to an error. In my case, I get the following:
Process started!
OUT ""
ERR "python.exe: can't open file 'createJSON.py': [Errno 2] No such file or directory\n"
It may be different in your case. Either way, using the errorOccurred signal along with the errorString method will allow you to debug cases where the process actually cannot start. Reading stderr will allow you to debug cases where the process starts, but does not run as expected.

Writing into stdout with MFC and read stdout by python Popen

I want to write into std::cerr or std::cout with my MFC application. In a python script I call this application and I want to read from stdout or stderr.
Both is not working. Just using std::cout yields no output. After AllocConsole() I was at least able to print to a debug console. Unfortunately, there is still no output on the python site.
In my MFC application I initialize a console to write to with this code:
void BindStdHandlesToConsole()
{
// Redirect the CRT standard input, output, and error handles to the console
freopen("CONIN$", "r", stdin);
freopen("CONOUT$", "w", stdout);
freopen("CONOUT$", "w", stderr);
std::wcout.clear();
std::cout.clear();
std::wcerr.clear();
std::cerr.clear();
std::wcin.clear();
std::cin.clear();
}
// initialization
BOOL foo::InitInstance()
{
// allocate a console
if (!AllocConsole())
AfxMessageBox("Failed to create the console!", MB_ICONEXCLAMATION);
else
BindStdHandlesToConsole();
On the python site, I try to print the output.
process = subprocess.Popen(args,stdout=subprocess.PIPE,shell=True)
output = process.stdout.read()
process.wait()
Is there a way to make my MFC program really write and the python script read the standard output?
The proper way of writing stuff to stdout pipe is the following:
HANDLE hStdOut = GetStdHandle ( STD_OUTPUT_HANDLE );
WriteFile( hStdOut, chBuf, dwRead, &dwWritten, NULL );

subprocess.popen (python) in C

Like the question says, I am searching for the subprocess.open module in C
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid)
print p.pid
I am trying to spawn a process and then get the process ID.
`cmd` is a c program that is spawned
In C, I believe that the getpid() does the job.
But after I spawn the process, I don't know how to tell getpid() to get that spawned process, pid.
void run_apps(pid) {
printf("process ID is %d\n", (int) getpid());
}
That is obviously giving the current process ID.
In C, you get the maximum number of options by first calling fork() to create a new process (the eventual subprocess), then one of the exec*() family of functions to execute the subprocess. Both the original process and the new process will run concurrently, so you can exchange (read and/or write data) via pipes or socket pairs. Finally, use e.g. waitpid() in a loop to wait for the new process to exit, and "reap" its exit status. For example:
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
int main(void)
{
pid_t child, p;
int status;
/*
* Prepare pipes et cetera first.
*/
/* Fork to create the subprocess. */
child = fork();
if (child == (pid_t)-1) {
/* Cannot fork(); usually out of resources (user limits).
* see errno for details. With <string.h>, you can use
* strerror(errno) to obtain the error string itself. */
return 1;
} else
if (!child) {
/* This is the child process itself.
* Do whatever cleanup is necessary, then
* execute the subprocess command. */
execlp("/bin/ls", "ls", "-lA", NULL);
/* This is only reached if the exec failed;
* again, see errno for reason.
* Always have the child process exit! */
return 127;
}
/* This is only run by the parent process
* (because the child always exits within the
* else if body above).
*
* The subprocess PID is 'child'.
*/
/* Wait for the child process to exit. */
do {
status = 0;
p = waitpid(child, &status, 0);
if (p == (pid_t)-1 && errno != EINTR)
break; /* Error */
} while (p != child);
if (p != child) {
/* Child process was lost.
* If (p == (pid_t)-1), errno describes the error.
*/
} else
if (WIFEXITED(status)) {
/* Child process exited with WEXITSTATUS(status) status.
* A status of 0 (or EXIT_SUCCESS) means success,
* no errors occurred. Nonzero usually means an error,
* but codes vary from binary to binary.
*/
} else
if (WIFSIGNALED(status)) {
/* Child process died from WTERMSIG(status) signal.
* If you include <string.h>, you can use
* strsignal(WTERMSIG(status))
* to obtain the name (string) of the terminating signal.
*/
} else {
/* Child process died from unknown causes.
*/
}
/* All done. */
return 0;
}
Personally, I prefer to use socketpair() to create Unix domain stream or datagram sockets between processes I control, and pipes created via pipe() if the subprocess is just some random binary to be run. In all cases you can replace the standard input (STDIN_FILENO descriptor), standard output (STDOUT_FILENO descriptor), and standard error (STDERR_FILENO descriptor) with sockets or pipes, using the dup2() function. You can even access the pseudo-files under /proc/[child]/ from the parent to observe the state of the child process if you want.
Depending on how you need to communicate with the subprocess -- input/output from/to files? strings in memory? dynamically allocated buffer for output -- there are many variants. Usually code similar to above is used when precise control and/or full-duplex (both read and write) and/or asynchronous communications are needed.
You can do a search for "linux" "fork" "exec" in your favourite search engine for examples of varying quality.
If you want an easier solution, and you only need to capture the output of the command (supplying no input to the command, or perhaps supplying the input from a file), you could use some variant of
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
int main(void)
{
FILE *sub;
pid_t subpid;
int status;
sub = popen("setsid /bin/sh -c 'echo $$ ; exec command args' </dev/null", "rb");
if (!sub) {
/* popen() failed. */
return 1;
}
/* Read the first line from sub. It contains the PID for the command. */
{
char buffer[32], *line, dummy;
int value;
line = fgets(buffer, sizeof buffer, sub);
if (!line) {
/* setsid failed, or non-POSIXy system (Windows?) */
pclose(sub);
return 1;
}
if (sscanf(line, "%d%c", &value, &dummy) != 1 || value < 2) {
/* Internal bug, or extra output? */
pclose(sub);
return 1;
}
/* subpid is also the session ID and the process group ID,
* because it is the session leader. */
subpid = value;
}
/* Read from sub using standard I/O, to capture command output. */
/* After no more output to read from sub, reap the subprocess. */
errno = 0;
do {
status = pclose(sub);
} while (status == -1 && errno == EINTR);
if (status) {
/* Problem: sub exited with nonzero exit status 'status',
* or if status == -1, some other error occurred. */
} else {
/* Sub exited with success (zero exit status). */
}
/* Done. */
return 0;
}
In Linux, popen() uses the /bin/sh shell (as per POSIX.1 specification), and we can use the setsid command-line utility to create the new session. Within the command, the echo $$ is a sh command which outputs the shell PID, and exec CMD... replaces the shell with the command; thus we get the PID of the command even before the command gets executed.

Categories

Resources