I am running Python 2.7 on a Unix environment (tested on Ubuntu and OSX)
I have the following programs:
With os.open():
[SCRIPT 1]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def set_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if os.path.exists(pipe_name):
os.remove(pipe_name)
os.mkfifo(pipe_name)
else:
os.mkfifo(pipe_name)
pipe_1 = os.open(pipe_1_name, os.O_WRONLY)
os.write(pipe_1, "server_message_0\n")
pipe_2 = open(pipe_2_name, 'r')
received = pipe_2.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_3 = open(pipe_3_name, 'r')
received = pipe_3.readline()[:-1]
print "[1] Now processing if received is correct: " + received
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = set_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
[SCRIPT 2]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def get_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if not os.path.exists(pipe_name):
raise Exception("Pipe "+pipe_name+" does not exist!")
pipe_1 = open(pipe_1_name, 'r')
received = pipe_1.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_2 = os.open(pipe_2_name, os.O_WRONLY)
os.write(pipe_2, "client_message_0\n")
pipe_3 = os.open(pipe_3_name, os.O_WRONLY)
os.write(pipe_3, "client_message_1\n")
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = get_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
The logic is simple:
[Pipe 1]
1. Script 1 opens a write pipe to Script 2.
2. Script 2 reads from the pipe.
[Pipe 2]
3. Script 2 open a write pipe to Script 1.
4. Script 1 reads from the pipe.
[Pipe 3]
5. Script 2 open a write pipe to Script 1.
6. Script 1 reads from the pipe.
Works exactly as expected.
Here is the problem. I don't want to use os.open(). I would like the receive a file object and use it to interface with the pipe. Clearly, it is not impossible since I can read from a pipe with a file object. However, The following script does not work.
Without os.open()
[Script 1]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def set_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if os.path.exists(pipe_name):
os.remove(pipe_name)
os.mkfifo(pipe_name)
else:
os.mkfifo(pipe_name)
pipe_1 = open(pipe_1_name, 'w')
pipe_1.write("server_message_0\n")
pipe_2 = open(pipe_2_name, 'r')
received = pipe_2.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_3 = open(pipe_3_name, 'r')
received = pipe_3.readline()[:-1]
print "[1] Now processing if received is correct: " + received
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = set_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
[Script 2]
import os
pipe_1_name = "pipe_1"
pipe_2_name = "pipe_2"
pipe_3_name = "pipe_3"
def get_connection():
pipe_names = [pipe_1_name, pipe_2_name, pipe_3_name]
for pipe_name in pipe_names:
if not os.path.exists(pipe_name):
raise Exception("Pipe "+pipe_name+" does not exist!")
pipe_1 = open(pipe_1_name, 'r')
received = pipe_1.readline()[:-1]
print "[0] Now processing if received is correct: " + received
pipe_2 = open(pipe_2_name, 'w')
pipe_2.write("client_message_0\n")
pipe_3 = open(pipe_3_name, 'w')
pipe_3.write("client_message_1\n")
print "Connection established."
return pipe_1,pipe_2,pipe_3
def main():
pipe_1, pipe_2, pipe_3 = get_connection()
print str(pipe_1)
print str(pipe_2)
print str(pipe_3)
if __name__ == "__main__":
main()
They look the same, don't they? The only difference is how I open the fifo. Instead of os.open(pipe_name,os.O_WRONLY) I use pipe = open(pipe_name, 'w').
What happens in the second set of scripts, the ones that don't use os.open(), Script 1 blocks at pipe_2 = open(pipe_2_name, 'r') while Script 2 blocks at pipe_2 = open(pipe_2_name, 'w').
Why is this happening?
Sorry for the wall of text. I am really confused about this issue.
What happens in the second set of scripts, the ones that don't use
os.open(), Script 1 blocks at pipe_2 = open(pipe_2_name, 'r') while Script 2 blocks at pipe_2 = open(pipe_2_name, 'w').
No, Script 2 blocks at received = pipe_1.readline()[:-1].
Why is this happening?
It's because Script 1's open(pipe_1_name, 'w') causes the written message to be buffered in fixed-size chunks (typically 4096 or 8192 bytes), so the pipe_1.write("server_message_0\n") does not yet write anything to the pipe, but only to the buffer, and Script 2 doesn't get anything to read. See open() and also How often does python flush to a file?
To cure this, since your messages are complete lines, it suffices to use line buffering, e. g.
pipe_1 = open(pipe_1_name, 'w', 1)
(as well for the other write pipes).
Related
In python3 with Ubuntu 16.04LTS, I have a subprocess that I created from my main script to record measurements from a device connected to my local machine. I would like to know how to send a message to this subprocess when I want to finish data recording, and switch to dumping the measurements to a csv file. Shown below is a stripped-down version of what I have tried so far, but the code hangs and I am unable to dump the measurements I record. In fact, I only record 1 measurement. I am not sure about how to asynchronously check for stdin inputs while recording data. May I please get some help?
Main.py
# start subprocess
p_1 = subprocess.Popen(["./ekg.py", saveFilename_ekg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# do other stuff
...
# send message to quit
message = str("1")
encMsg = message.encode()
print("Message:", encMsg.decode())
p_stdout = p_1.communicate(input=encMsg)[0]
# print "Done" from subprocess
print(p_stdout.decode('utf-8').strip())
# kill subprocess
p_1.kill()
ekg.py
def dumpLiveData(outputFile):
ekg = ekgClass()
dataMeasurements = []
for liveData in ekg.getLiveData():
# monitor stdin for message
if int(sys.stdin.read()) == 1:
break
else:
meas = [liveData.time, liveData.pulseWaveform]
dataMeasurements.append(meas)
#print ("Dumping data")
with open(outputFile, 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
#print ("Created text file")
header = ["Time", "Waveform value"]
writer.writerow(header)
for idx, val in enumerate(dataMeasurements):
writer.writerow(dataMeasurements[idx])
print("Done")
if __name__== "__main__":
# get parameters
parser = argparse.ArgumentParser(description="ekg.py")
parser.add_argument("outputFile", help="Output CSV file.")
# parse
args = parser.parse_args()
# record and dump measurements
dumpLiveData(args.outputFile)
Solved it by sending a control + C event to the subprocess. An try-except-else block caught the keyboard interrupt, processed it, and then I gracefully exit the block. After exiting, I write the data recorded to a csv file.
main.py
import subprocess, signal
# start subprocess
p_1 = subprocess.Popen(["./ekg.py", saveFilename_ekg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# do other stuff
...
# send control + C event
p_1.send_signal(signal.SIGINT)
stdout, stderr = p_1.communicate(input=encMsg)[0]
# print output from subprocess
print(stdout.decode('utf-8').strip())
# wait for subprocess to write file
p_1.wait()
# kill subprocess
p_1.kill()
ekg.py
def dumpLiveData(outputFile):
ekg = ekgClass()
dataMeasurements = []
exception_found = None
try:
for liveData in ekg.getLiveData():
if exception_found == True:
break
meas = [liveData.time, liveData.pulseWaveform]
dataMeasurements.append(meas)
except KeyboardInterrupt:
exception_found = True
else:
pass
print ("Dumping data")
with open(outputFile, 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
print ("Created text file")
header = ["Time", "Waveform value"]
writer.writerow(header)
for idx, val in enumerate(dataMeasurements):
writer.writerow(dataMeasurements[idx])
print("Done")
I'm in the process of creating a program that takes an IP address, performs an nmap scan, and takes the output and puts it in a text file. The scan works fine, but I can't seem to figure out why it's not writing anything to the text file.
Here is what I have so far
if __name__ == "__main__":
import socket
import nmap
import sys
import io
from libnmap.parser import NmapParser, NmapParserException
from libnmap.process import NmapProcess
from time import sleep
from os import path
#Program Banner
if len(sys.argv) <= 1:
print(
"""
test
""")
sys.exit()
#Grab IP Address as argument
if len(sys.argv)==2:
ip = sys.argv[1]
print "\n[+] Reading IP Address"
#Function - Pass IP to Nmap then start scanning
print "\n[+] Passing " + ip + " to Nmap..."
print("\n[+] Starting Nmap Scan\n")
def nmap_scan(ip, options):
parsed = None
nmproc = NmapProcess(ip, options)
rc = nmproc.run()
if rc != 0:
print("nmap scan failed: {0}".format(nmproc.stderr))
try:
parsed = NmapParser.parse(nmproc.stdout)
except NmapParserException as e:
print("Exception raised while parsing scan: {0}".format(e.msg))
return parsed
#Function - Display Nmap scan results
def show_scan(nmap_report):
for host in nmap_report.hosts:
if len(host.hostnames):
tmp_host = host.hostnames.pop()
else:
tmp_host = host.address
print("Host is [ %s ]\n" % str.upper(host.status))
print(" PORT STATE SERVICE")
for serv in host.services:
pserv = "{0:>5s}/{1:3s} {2:12s} {3}".format(
str(serv.port),
serv.protocol,
serv.state,
serv.service)
if len(serv.banner):
pserv += " ({0})".format(serv.banner)
print(pserv)
#Function - Define output text file name & write to file
def createFile(dest):
name = "Enumerator-Results.txt"
if not(path.isfile(dest+name)):
f = open(dest+name,"a+")
f.write(show_scan(report))
f.close()
if __name__ == "__main__":
report = nmap_scan(ip, "-sV")
if report:
destination = "/root/Desktop/"
createFile(destination)
show_scan(report)
print "\nReport Complete!"
else:
print("No results returned")
You're using print statements in your show_scan() function. Instead try passing the file reference to show_scan() and replacing the print() calls with f.write() calls. This would save to file everything you're currently printing to the terminal.
Alternatively you could just change your code so that the show_scan is separate from the f.write().
ie change
f.write(show_scan(report))
to
f.write(report)
It depends on whether you want to save the raw output or what you're printing to the screen.
Also you will need to pass the reference of the report to createFile so that it has the report to print ie
createFile(destination, report)
Just make sure you are always calling f.write() with a string as its parameter.
#Function - Define output text file name & write to file
def createFile(dest, report):
name = "Enumerator-Results.txt"
if not(path.isfile(dest+name)):
f = open(dest+name,"a+")
f.write(report)
f.close()
if __name__ == "__main__":
report = nmap_scan(ip, "-sV")
if report:
destination = "/root/Desktop/"
createFile(destination, report)
show_scan(report)
print "\nReport Complete!"
else:
print("No results returned")
I am playing with the file I/O functions, and I am having issues writing to a file.
To get a feel for it, I have either run a FOR loop on a range, adding each to a new line, or done the same for a list. Either way, I get the following appended to the file after the loop:
98
99
is dropped.
"""
global quitting
try:
raise
except SystemExit:
raise
except EOFError:
global exit_now
exit_now = True
thread.interrupt_main()
except:
erf = sys.__stderr__
print>>erf, '\n' + '-'*40
print>>erf, 'Unhandled server exception!'
print>>erf, 'Thread: %s' % threading.currentThread().getName()
print>>erf, 'Client Address: ', client_address
print>>erf, 'Request: ', repr(request)
traceback.print_exc(file=erf)
print>>erf, '\n*** Unrecoverable, server exiting!'
print>>erf, '-'*40
quitting = True
thread.interrupt_main()
class MyHandler(rpc.RPCHandler):
def handle(self):
"""Override base method"""
executive = Executive(self)
self.register("exec", executive)
self.console = self.get_remote_proxy("console")
sys.stdin = PyShell.PseudoInputFile(self.console, "stdin",
IOBinding.encoding)
sys.stdout = PyShell.PseudoOutputFile(self.console, "stdout",
IOBinding.encoding)
sys.stderr = PyShell.PseudoOutputFile(self.console, "stderr",
IOBinding.encoding)
# Keep a reference to stdin so that it won't try to exit IDLE if
# sys.stdin gets changed from within IDLE's shell. See issue17838.
self._keep_stdin = sys.stdin
self.interp = self.get_remote_proxy("interp")
rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
def exithook(self):
"override SocketIO method - wait for MainThread to shut us down"
time.sleep(10)
<ad nauseum>
The code for creating this is:
f = open('test.txt', 'w+')
for x in range(100):
f.write((str(x) + '\n'))
f.read()
But even if I close it and open the file itself, this stuff is appended.
How can I just write the data to the file without this extra stuff?
I'm very new with python.
I started implementing twp daemon processes that will send messages to each other.
right now i have just 2 daemons that are running.
I don't understand how to build something that they can communicate through..
I read that there are pipe, or queue ...
sill, could not understand how to build a pipe or a queue that the two ends will be the two processes..
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print 'Starting:', p.name, p.pid
sys.stdout.flush()
while (1):
time.sleep(1)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
def machine_func():
p = multiprocessing.current_process()
print 'Starting:', p.name, p.pid
sys.stdout.flush()
while (1):
time.sleep(1)
print 'Exiting :', p.name, p.pid
sys.stdout.flush()
cs = multiprocessing.Process(name='control_service', target=control_service_func)
cs.daemon = True
m = multiprocessing.Process(name='machine', target=machine_func)
m.daemon = True
cs.start()
m.start()
You can find very good examples here: Communication Between Processes
you can communicate with daemons via text files like this:
from multiprocessing import Process
from ast import literal_eval as literal
from random import random
import time
def clock(): # 24 hour clock formatted HH:MM:SS
return str(time.ctime())[11:19]
def sub_a(): # writes dictionary that tallys +/- every second
a = 0
while 1:
data = {'a': a}
opened = 0
while not opened:
try:
with open('a_test.txt', 'w+') as file:
file.write(str(data))
opened = 1
except:
print ('b_test.txt in use, try WRITE again...')
pass
a+=1
time.sleep(random()*2)
def sub_b(): # writes dictionary that tallys +/- every 2 seconds
b = 0
while 1:
data = {'b': b}
opened = 0
while not opened:
try:
with open('b_test.txt', 'w+') as file:
file.write(str(data))
opened = 1
except:
print ('b_test.txt in use, try WRITE again...')
pass
b += 1
time.sleep(random()*4)
# clear communication lines
with open('a_test.txt', 'w+') as file:
file.write('')
with open('b_test.txt', 'w+') as file:
file.write('')
# begin daemons
sa = Process(target=sub_a)
sa.daemon = True
sb = Process(target=sub_b)
sb.daemon = True
sa.start()
sb.start()
begin = time.time()
m = 0
while 1:
m += 1
time.sleep(1)
elapsed = int(time.time()-begin)
#fetch data from deamons
opened = 0
while not opened:
try:
with open('a_test.txt', 'r') as f:
a = literal(f.read())
opened = 1
except:
print ('a_test.txt in use, try READ again...')
pass
opened = 0
while not opened:
try:
with open('b_test.txt', 'r') as f:
b = literal(f.read())
opened = 1
except:
print ('READ b_test.txt in use, try READ again...')
pass
print(clock(), '========', elapsed, b['b'], a['a'])
in this manner you can make object (like a dict) into string, write() to file, then:
ast.literal_eval
to get it back out on the other side when you read()
while not opened try
method prevents race condition so daemons and main process have time needed to not clash while they open/process/close the file
with open as file
method ensures file is opened and closed efficiently
added bonus is you can open the text file in an editor to check its state in real time.
I want to tail multiple files concurrently and push the logs to scribe.
I am reading the files from a Config file then I want to tail each file and send the logs to scribe.
What I have tried is sends log for only the first file and doesn't for the others.
I want to run the tailing concurrently for each file and send the logs for each one of them at same time.
for l in Config.items('files'):
print l[0]
print l[1]
filename = l[1]
file = open(filename,'r')
st_results = os.stat(l[1])
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
category=l[0]
message=line
log_entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport, strictRead=False, strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[log_entry])
transport.close()
Try something like this (Inspired by this)
import threading
def monitor_file(l):
print l[0]
print l[1]
filename = l[1]
file = open(filename,'r')
st_results = os.stat(l[1])
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
category=l[0]
message=line
log_entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport, strictRead=False, strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[log_entry])
transport.close()
for l in Config.items('files'):
thread = threading.Thread(target=monitor_file, args=(l))
A different implementation of #Pengman's idea:
#!/usr/bin/env python
import os
import time
from threading import Thread
def follow(filename):
with open(filename) as file:
file.seek(0, os.SEEK_END) # goto EOF
while True:
for line in iter(file.readline, ''):
yield line
time.sleep(1)
def logtail(category, filename):
print category
print filename
for line in follow(filename):
print line,
log_entry(category, line)
for args in Config.items('files'):
Thread(target=logtail, args=args).start()
Where log_entry() is a copy of the code from the question:
def log_entry(category, message):
entry = scribe.LogEntry(category, message)
socket = TSocket.TSocket(host='localhost', port=1463)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(trans=transport,strictRead=False,
strictWrite=False)
client = scribe.Client(iprot=protocol, oprot=protocol)
transport.open()
result = client.Log(messages=[entry])
transport.close()
follow() could be implemented using FS monitoring tools, see tail -f in python with no time.sleep.