Right now I use this to catch the output of a Python function and store it in a variable:
import io
from contextlib import redirect_stdout
def catch_output(func):
result = io.StringIO()
with redirect_stdout(result):
func()
return result.getvalue()
output = catch_output(my_func)
This works fine, but it also mutes the console until the func call finished.
Does anybody know if I can write/pipe the live output of the func to the console and store it in a variable at the same time?
You can redirect stdout to a custom file-like object that forwards writes to multiple files:
import contextlib
import io
import sys
class TeeIO:
def __init__(self, original, target):
self.original = original
self.target = target
def write(self, b):
self.original.write(b)
self.target.write(b)
#contextlib.contextmanager
def tee_stdout(target):
tee = TeeIO(sys.stdout, target)
with contextlib.redirect_stdout(tee):
yield
buf = io.StringIO()
with tee_stdout(buf):
print("foo")
print(buf.getvalue())
This is what I ended up using. I thought I leave this here for people who have a hard time with classes and oop, like me.
import sys
import io
from contextlib import redirect_stdout
def get_multi_writer(streams):
writer = type('obj', (object,), {})
writer.write = lambda s: [stream.write(s) for stream in streams]
return writer
def catch_output(func, args, kwargs):
streams = [sys.stdout, io.StringIO()]
with redirect_stdout(get_multi_writer(streams)):
func(*args, **kwargs)
return streams[1].getvalue()
print(catch_output(my_func, [], {}))
As per the suggestions from the comments I've made and example turning our function into a thread so we can simultaneously check for output from that function periodically and copy it to the real stdout.
import sys
import time
import threading
from cStringIO import StringIO
def foo(n):
for x in range(n):
time.sleep(1) #intense computation
print('test: {}'.format(n))
#i'm using python 2.7 so I don't have contextlib.redirect_stdout
realstdout = sys.stdout
sys.stdout = StringIO()
t = threading.Thread(target=foo, args=(10,))
t.start()
lastpos = 0 #last cursor position in file
while True:
t.join(.1) #wait .1 sec for thread to complete
if sys.stdout.tell() != lastpos: #data has been written to stdout
sys.stdout.seek(lastpos) #go back to our last position
realstdout.write(sys.stdout.read()) #read the data to the real stdout
lastpos = sys.stdout.tell() #update lastpos
if not t.is_alive(): #when we're done
break
sys.stdout.seek(0) #seek back to beginning of file
output = sys.stdout.read() #copy to a usable variable
sys.stdout = realstdout #reset stdout
Related
I would like to retrieve the result of com function.
For example, I tried com('a=10') and then com('2*a').
It displays 20, but the com function does not return 20.
I tried too format(inqueue.put(x)) and got 'None'.
May be, a sys.sdout ?
Thanks for your help.
import threading
import code
import sys
try:
import queue
except ImportError:
import Queue as queue
class InteractiveConsole(code.InteractiveConsole):
def __init__(self, inqueue, exitevent):
code.InteractiveConsole.__init__(self)
self.inqueue = inqueue
self.keep_prompting = True
self.stdin = sys.stdin
sys.stdin = self
def readline(self):
#Implement sys.stdin readline method
rl = self.inqueue.get()
return rl
def interact(self):
while self.keep_prompting:
line = self.raw_input('')
more = self.push(line)
def run_interact(*args):
#Start the"python interpreter" engine
iconsole = InteractiveConsole(*args)
iconsole.interact()
inqueue = queue.Queue()
exitevent = threading.Event()
proc = threading.Thread(target=run_interact, args=(inqueue, exitevent))
proc.start()
def com(x):
return inqueue.put(x)
I want to create a process that has 3 sub processes, 2 to handle websockets and one to get input from terminal to pass to the other two sockets.
import sys
import time
import select
import asyncio
import threading
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Process
def managment_api():
poller = select.poll()
poller.register(sys.stdin, select.POLLIN)
started = True
count = 0
while started:
print("management api [{:^6}]".format(count))
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
_line = sys.stdin.readline()
if _line:
line = _line.strip().lower()
if(line == "exit" or line == "quit"):
started = False
print("got exit message [{}]".format(started))
else:
pass
count += 1
time.sleep(1)
def process_main(loop):
print("process_main BEGIN")
loop.run_in_executor(None, managment_api())
print("process_main ENG ")
if __name__ == "__main__":
#this doesn't work
main = Process(target=managment_api, args=())
main.name = "management api"
main.start()
main.join()
"""
#asyncio.run(managment_api()) #need to add async to management_api
When I make management_api an async function and call asyncio.run(management_api); I can get input.
If I try to run the same code without async in a separate process it, I get stuck in the while sys.stdin in selec... section of the code. I've tried with threads but that doesn't work either.
How can I run this code from a separate process, to get the input in another process?
I was able to solve the problem by first, using fn = sys.stdin.fileno() to get the main process file descriptor, passing that as an argument to the subprocess. Then using sys.stdin = os.fdopen(fn)
import sys
import time
import select
import asyncio
import threading
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Process
def managment_api(fn):
sys.stdin = os.fdopen(fn)
poller = select.poll()
poller.register(sys.stdin, select.POLLIN)
started = True
count = 0
while started:
print("management api [{:^6}]".format(count))
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
_line = sys.stdin.readline()
if _line:
line = _line.strip().lower()
if(line == "exit" or line == "quit"):
started = False
print("got exit message [{}]".format(started))
else:
pass
count += 1
time.sleep(1)
def process_main(loop):
print("process_main BEGIN")
loop.run_in_executor(None, managment_api())
print("process_main ENG ")
if __name__ == "__main__":
fn = sys.stdin.fileno()
main = Process(target=managment_api, args=(fn, ))
main.name = "management api"
main.start()
main.join()
"""
#asyncio.run(managment_api()) #need to add async to management_api
I want to get the output of an exec(...) Here is my code:
code = """
i = [0,1,2]
for j in i :
print j
"""
result = exec(code)
How could I get the things that print outputed?
How can I get something like:
0
1
2
Regards and thanks.
Since Python 3.4 there is a solution is the stdlib: https://docs.python.org/3/library/contextlib.html#contextlib.redirect_stdout
from io import StringIO
from contextlib import redirect_stdout
f = StringIO()
with redirect_stdout(f):
help(pow)
s = f.getvalue()
In older versions you can write a context manager to handle replacing stdout:
import sys
from io import StringIO
import contextlib
#contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
code = """
i = [0,1,2]
for j in i :
print j
"""
with stdoutIO() as s:
exec(code)
print("out:", s.getvalue())
Here is Py3-friendly version of #Jochen's answer. I also added try-except clause to recover in case of errors in the code.
import sys
from io import StringIO
import contextlib
#contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
code = """
i = [0,1,2]
for j in i :
print(j)
"""
with stdoutIO() as s:
try:
exec(code)
except:
print("Something wrong with the code")
print("out:", s.getvalue())
You can redirect the standard output to a string for the duration of the exec call:
Python2
import sys
from cStringIO import StringIO
code = """
i = [0,1,2]
for j in i:
print(j)
"""
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
exec(code)
sys.stdout = old_stdout
print(redirected_output.getvalue())
Python3
import sys
from io import StringIO
code = """
i = [0,1,2]
for j in i:
print(j)
"""
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
exec(code)
sys.stdout = old_stdout
print(redirected_output.getvalue())
Here is a small correction of Frédéric's answer. We need to handle a possible exception in exec() to return back normal stdout. Otherwise we could not see farther print outputs:
code = """
i = [0,1,2]
for j in i :
print j
"""
from cStringIO import StringIO
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
try:
exec(code)
except:
raise
finally: # !
sys.stdout = old_stdout # !
print redirected_output.getvalue()
...
print 'Hello, World!' # now we see it in case of the exception above
Python 3: Get the output of the exec into a variable
import io, sys
print(sys.version)
#keep a named handle on the prior stdout
old_stdout = sys.stdout
#keep a named handle on io.StringIO() buffer
new_stdout = io.StringIO()
#Redirect python stdout into the builtin io.StringIO() buffer
sys.stdout = new_stdout
#variable contains python code referencing external memory
mycode = """print( local_variable + 5 )"""
local_variable = 2
exec(mycode)
#stdout from mycode is read into a variable
result = sys.stdout.getvalue().strip()
#put stdout back to normal
sys.stdout = old_stdout
print("result of mycode is: '" + str(result) + "'")
Prints:
3.4.8
result of mycode is: '7'
Also a reminder that python exec(...) is evil and bad because 1. It makes your code into unreadable goto-spaghetti. 2. Introduces end-user code injection opportunities, and 3. Throws the exception stacktrace into chaos because exec is made of threads and threads are bad mmkay.
Something like:
codeproc = subprocess.Popen(code, stdout=subprocess.PIPE)
print(codeproc.stdout.read())
should execute the code in a different process and pipe the output back to your main program via codeproc.stdout. But I've not personally used it so if there's something I've done wrong feel free to point it out :P
You could combine exec with eval to get an output as list:
ExecString('i = [0,1,2]');
println(EvalStr('[j for j in i]'));
I'm using Twisted + AMP to communicate between a server and client, both of which are Python, fully under my control. My messages are usually short, but sometimes an argument can be longer than the 64K limit. Is there any way to handle this gracefully?
I see that AMPv2 handles long messages, but I think that the Twisted implementation is for AMPv1.
I suspect chunking will be part of the answer but I'm not sure how to do that. I only have one method that is susceptible to these long messages, so I don't need the most general solution. I am open to a making different amp.Argument subclass if it will help.
You can use the below code
# From lp:~glyph/+junk/amphacks
"""
An argument type for sending medium-sized strings (more than 64k, but small
enough that they still fit into memory and don't require streaming).
"""
from cStringIO import StringIO
from itertools import count
from twisted.protocols.amp import AMP, Argument, Command
CHUNK_MAX = 0xffff
class BigString(Argument):
def fromBox(self, name, strings, objects, proto):
value = StringIO()
value.write(strings.get(name))
for counter in count(2):
chunk = strings.get("%s.%d" % (name, counter))
if chunk is None:
break
value.write(chunk)
objects[name] = value.getvalue()
def toBox(self, name, strings, objects, proto):
value = StringIO(objects[name])
firstChunk = value.read(CHUNK_MAX)
strings[name] = firstChunk
counter = 2
while True:
nextChunk = value.read(CHUNK_MAX)
if not nextChunk:
break
strings["%s.%d" % (name, counter)] = nextChunk
counter += 1
class Send(Command):
arguments = [('big', BigString())]
class Example(AMP):
#Send.responder
def gotBig(self, big):
print 'Got a big input', len(big)
f = file("OUTPUT", "wb")
f.write(big)
f.close()
return {}
def main(argv):
from twisted.internet import reactor
from twisted.internet.protocol import Factory, ClientCreator
if argv[1] == 'client':
filename = argv[2]
def connected(result):
result.callRemote(Send, big=file(filename).read())
ClientCreator(reactor, AMP).connectTCP("localhost", 4321).addCallback(
connected)
reactor.run()
elif argv[1] == 'server':
f = Factory()
f.protocol = Example
reactor.listenTCP(4321, f)
reactor.run()
else:
print "Specify 'client' or 'server'."
if __name__ == '__main__':
from sys import argv as arguments
main(arguments)
PS: The code is taken from https://raw.githubusercontent.com/fusionapp/documint/8fdbaeb3aeb298afff4ba951243d03c98fe8ff99/documint/mediumbox.py
I am new to python. I am writing a python program to write to a JSON file if the website is unreachable. The multiple websites will be stored in hosts variable. It will be scheduled to check every 5 seconds. I have used pool from multiprocessing to process the website at the same time without delay. After that, i will write the data to the json file. But in here, it is writing only one website data to json file. So how to make this to write two data at the same time.
Here's the sample code:
import os
from multiprocessing import Pool
from datetime import datetime
import time
import json
hosts = ["www.google.com","www.smackcoders.com"]
n = len(hosts)
def write(hosts):
u = "down"
name = "stack.json"
if not os.path.exists(name):
with open(name, 'w') as f:
f.write('{}')
result = [(timestamp, {'monitor.status': u,
"monitor.id": "tcp-tcp#"+hosts
})]
with open(name, 'rb+') as f:
f.seek(-1, os.SEEK_END)
f.truncate()
for entry in result:
_entry = '"{}":{},\n'.format(entry[0], json.dumps(entry[1]))
_entry = _entry.encode()
f.write(_entry)
f.write('}'.encode('ascii'))
def main(hosts):
p = Pool(processes= n)
result = p.map(write, hosts)
while True:
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
main(hosts)
time.sleep(5)
My output:
""March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.smackcoders.com", "monitor.status": "down"},
}
Required Output:
{"March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.smackcoders.com", "monitor.status": "down"},"March 13 2019, 10:49:03":{"monitor.id": "tcp-tcp#www.google.com", "monitor.status": "down"},
}
Ive made some minor changes to your code and implemented a Lock.
import os
from multiprocessing import Pool,RLock
from datetime import datetime
import time
import json
file_lock=RLock()
hosts = ["www.google.com","www.smackcoders.com"]
n = len(hosts)
def write(hosts):
u = "down"
name = "stack.json"
if not os.path.exists(name):
with open(name, 'w') as f:
f.write('{}')
result = [(timestamp, {'monitor.status': u,
"monitor.id": "tcp-tcp#"+hosts
})]
with file_lock:
with open(name, 'rb+') as f:
f.seek(-1, os.SEEK_END)
f.truncate()
for entry in result:
_entry = '"{}":{},\n'.format(entry[0], json.dumps(entry[1]))
_entry = _entry.encode()
f.write(_entry)
f.write('}'.encode('ascii'))
def main(hosts):
p = Pool(processes= n)
result = p.map(write, hosts)
while True:
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
main(hosts)
time.sleep(5)
However, for a long running process that constantly has to read and write a file for logging seems like a poor implementation as the code will have to read a bulky file and completely rewrite it on every process. Consider writing the log in a database instead.
Here's a different option that will use Thread over Pool.
Created a class to get the return of join()
# Class that overwrite Thread to get the return of join()
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=None, kwargs=None, Verbose=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
super().__init__(group, target, name, args, kwargs)
self._return = None
def run(self):
print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
I have changed the code to get the status of each hosts first, then writing the result to your file. Also fixed the way the JSON file is written.
import os
from datetime import datetime
import time
import json
from threading import Thread
hosts = ["www.google.com","www.smackcoders.com"]
filepath = os.path.join(os.getcwd(), "stack.json")
n = len(hosts)
def perform_ping(host_ip):
"""
You have hardcoded down, this method will ping to check if we get an ICMP response
"""
response = os.system("ping -c 1 " + host_ip)
if response == 0:
return 'UP'
else:
return 'DOWN'
def write_result(timestamp, results):
# u = "down" Using perform_ping to get the status
if not os.path.exists(filepath):
current_file = {}
else:
# If file exist, reading the current output
with open(filepath, 'r') as f_read:
current_file = json.loads(f_read.read())
inner_result = []
for result in results:
host, status = result
inner_result.append({'monitor.status': status,
"monitor.id": "tcp-tcp#"+host
})
current_file[timestamp] = inner_result
# writing the file with new input
with open(filepath, 'w') as f_write:
f_write.write(json.dumps(current_file))
def main():
while True:
thread_list = []
for host_ip in hosts:
thread_list.append(ThreadWithReturnValue(target=perform_ping, name=host_ip, args=(host_ip, )))
results = []
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
for thread in thread_list:
thread.start()
for thread in thread_list:
results.append((thread.name, thread.join()))
# Ping is done in parallel, writing the result at the end to avoid thread collision and reading/writing the file to many times if you increase the number of host
write_result(timestamp, results)
time.sleep(5)
if __name__ == '__main__':
main()