Modify function's code in wrapper function - python

Is it possible to replace the actual code of a function by the function that wraps it? Here I'm trying to replace print statements with log statements:
import logging
import re
def print_to_log(func):
def wrapper_print_to_log(*args, **kwargs):
# how to do something like this?
re.sub(r'print\s*\(', 'logging.info(', **function_code**)
return func(*args, **wargs)
return wrapper_print_to_log
#print_to_log
def greet(name='bob'):
print ("Hello, %s" % name)
print ("How are you sir?")
Is it possible to replace the code or do something similar to the above?

Perhaps you can temporarily replace sys.stdout?
import logging
import re
import sys
import io
def print_to_log(func):
def wrapper_print_to_log(*args, **kwargs):
stdout = sys.stdout
b = io.StringIO()
sys.stdout = b
try:
return func(*args, **kwargs)
finally:
sys.stdout = stdout
print(b.getvalue().upper())
return wrapper_print_to_log
#print_to_log
def greet(name='bob'):
print("Hello, %s" % name)
print("How are you sir?")
greet('justin')

Related

How to write something into input() built-in function from code directly

I have a need to fill stdin from code directly when input() is waiting for filling.
Is there to do the next:
# Here suppose to be some code that will automatically fill input() below
string = input("Input something: ")
# Or here
I've heard about subprocess.Popen, but I don't understand how to use it in my case. Thank you.
This code is something:
import sys
from io import StringIO
class File(StringIO):
def __init__(self):
self._origin_out = sys.stdout
self._origin_in = sys.stdin
sys.stdout = self
sys.stdin = self
self._in_data = ''
super(File, self).__init__()
def write(self, data):
if data == 'My name is:':
self._in_data = 'Vasja\n'
else:
self._origin_out.write(data)
def readline(self, *args, **kwargs):
res = self._in_data
if res:
self._in_data = ''
return res
else:
return sys.stdin.readline(*args, **kwargs)
def __del__(self):
sys.stdout = self._origin_out
sys.stdin = self._origin_in
global_out_file = File()
a = input('My name is:')
print('Entered name is:', a)

Python Decorator Log file not generating

I am new to Python and learning logging technique with Decorator.
For me the below code is not generating required log file. Debugged the code, getting correct message to logger statement but the file is not generating. From Test method i am call the required function where i have implemented Decorator. Please guide where i am doing mistake.
try:
import csv
import requests
import datetime
import os
import sys
import logging
except Exception as e:
print("Some Modules are missing {}".format(e))
class Meta(type):
""" Meta class"""
def __call__(cls, *args, **kwargs):
instance = super(Meta, cls).__call__(*args, **kwargs)
return instance
def __init__(cls, name, base, attr):
super(Meta, cls).__init__(name, base, attr)
class log(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
""" Wrapper Function"""
start = datetime.datetime.now() #start time
Tem = self.func(*args) #call Function
Argument = args
FunName = self.func.__name__ #get Function name
end = datetime.datetime.now() #end Time
message = """
Function : {}
Execustion Time : {}
Argument : {}
Memory : {} Bytes
Date : {}
""".format(FunName,
end-start,
Argument,
sys.getsizeof(self.func),
start
)
cwd = os.getcwd();
folder = 'Logs'
newPath = os.path.join(cwd, folder)
try:
"""Try to create a folder """
os.mkdir(newPath)
except:
"""Folder already exist """
logging.basicConfig(filename='apiRun.log'.format(newPath), level=logging.DEBUG)
logging.debug(message)
return Tem
class APIHelper(metaclass=Meta):
def __init__(self, *args, **kwargs):
pass
#log
def star_wars_characters(url):
#self.url = url
api_response = requests.get(url)
people = []
if api_response.status_code == 200:
data = api_response.json()
for d in data['results']:
character = []
character.append(d['name'])
character.append(d['height'])
character.append(d['gender'])
people.append(character)
return people
else:
return "Bad Request"
My Test Method:
import unittest
import csv
from com.Script.APIHelper import APIHelper
class TestAPI(unittest.TestCase):
def _setUp(self, file_name):
self.api = APIHelper()
with open(file_name, "w") as self.fd:
self.csvfile = csv.writer(self.fd, delimiter = ',')
self.csvfile.writerow(['Name','Height','Gender'])
def tearDown(self):
self.fd.close()
def test_responseNotEmpty(self):
file_name = 'SWAPI.csv'
self._setUp(file_name)
people = self.api.star_wars_characters("https://swapi.dev/api/people/")
assert type(people) is list
Thanks you in Advance.
Add finally
Change filename='apiRun.log' to filename='{}/apiRun.log'
try:
"""Try to create a folder """
os.mkdir(newPath)
except:
"""Folder already exist """
finally:
logging.basicConfig(filename='{}/apiRun.log'.format(newPath), level=logging.DEBUG)
logging.debug(message)
except is executed only when an exception is raised from try.
finally is always executed.

Remove stacktrace from python gevent output

I'm new to python and trying to remove/trim gevent stacktrace output when an exception is raised. I read somewhere that I can make it happen by using AsyncResult, however it seems like I can't figure out how to use this.
Here is an example I started with and iterated over to make it similar to the real code I'm troubleshooting, but I got stuck in the last phase when I tried to add my_decor to work().
Any help fixing this is much appreciated.
from gevent.event import AsyncResult
import gevent
from functools import wraps
def my_decor(k, *args, **kwargs):
#wraps(k)
def wrapper(*args, **kwargs):
r = AsyncResult()
try:
value = k()
except Exception as e:
r.set_exception(e)
else:
r.set(value)
return r.exception or r.value
result = gevent.spawn(wrapper, k)
return result
def f():
def foo():
if True:
raise Exception('tttttttt')
return foo
def p():
def bar():
if True:
raise Exception('ppppppppppppp')
return bar
#my_decor
def work():
foo1 = gevent.spawn(f())
bar1 = gevent.spawn(p())
gevent.joinall([foo1, bar1])
return foo1.get() or bar1.get()
Found the answer, figured it might be a help to those with the same problem.
from gevent.event import AsyncResult
import gevent
from functools import wraps
def my_decor(k):
#wraps(k)
def wrapper(*args, **kwargs):
r = AsyncResult()
try:
value = k(*args, **kwargs)
except Exception as e:
r.set_exception(e)
else:
r.set(value)
return r.exception or r.value
return wrapper
def f(msg):
#my_decor
def foo():
if True:
raise Exception('tttttttt %s' % msg)
# print('test')
return foo
def p(msg):
#my_decor
def bar():
if True:
raise Exception('ppppppppppppp %s', msg)
return bar
def work():
test = "test"
seti = "set"
foo1 = gevent.spawn(f(test)) # returns a function that coroutine uses
bar1 = gevent.spawn(p(seti))
gevent.joinall([foo1, bar1])
return foo1.get() or bar1.get()
res = work()
print res

how to wrap automatically functions from certain file

It's a well known fact there are many ways to get a function name using python standard library, here's a little example:
import sys
import dis
import traceback
def get_name():
stack = traceback.extract_stack()
filename, codeline, funcName, text = stack[-2]
return funcName
def foo1():
print("Foo0 start")
print("Inside-_getframe {0}".format(sys._getframe().f_code.co_name))
print("Inside-traceback {0}".format(get_name()))
print("Foo1 end")
def foo2():
print("Foo2 start")
print("Inside {0}".format(sys._getframe().f_code.co_name))
print("Inside-traceback {0}".format(get_name()))
print("Foo2 end")
def foo3():
print("Foo3 start")
print("Inside {0}".format(sys._getframe().f_code.co_name))
print("Inside-traceback {0}".format(get_name()))
print("Foo3 end")
for f in [foo1, foo2, foo3]:
print("Outside: {0}".format(f.__name__))
f()
print('-' * 80)
You can use traceback, sys._getframe, dis and maybe there is a lot of more options... so far so good, python is awesome to do this kind of introspection.
Now, here's the thing, I'd like to know how to wrap automatically functions (at file level) to print its name and also measuring the execution time when they are executed. For instance, something like this:
def foo1():
print("Foo0 processing")
def foo2():
print("Foo2 processing")
def foo3():
print("Foo3 processing")
wrap_function_from_this_file()
for f in [foo1, foo2, foo3]:
f()
print('-' * 80)
Would print something like:
foo1 started
Foo1 processing
foo1 finished, elapsed time=1ms
--------------------------------------------------------------------------------
foo2 started
Foo2 processing
foo2 finished, elapsed time=2ms
--------------------------------------------------------------------------------
foo3 started
Foo3 processing
foo3 finished, elapsed time=3ms
--------------------------------------------------------------------------------
As you can see, the idea would be not adding any wrapper per-function manually to the file's functions. wrap_function_from_this_file would automagically introspect the file where is executed and it'd modify functions wrapping them somewhat, in this case, wrapping the functions with some code printing its name and execution time.
Just for the record, I'm not asking for any profiler. I'd like to know whether this is possible to do and how.
A solution could be to use globals() for getting information about currently defined objects. Here is a simple wrapper function, which replaces the functions within the given globals data by a wrapped version of them:
import types
def my_tiny_wrapper(glb):
def wrp(f):
# create a function which is not in
# local space of my_tiny_wrapper
def _inner(*args, **kwargs):
print('wrapped', f.__name__)
return f(*args, **kwargs)
print('end wrap', f.__name__)
return _inner
for f in [f for f in glb.values() if type(f) == types.FunctionType
and f.__name__ != 'my_tiny_wrapper']:
print('WRAP FUNCTION', f.__name__)
glb[f.__name__] = wrp(f)
It can be used like this:
def peter(): pass
def pan(a): print('salat and onions')
def g(a,b,c='A'): print(a,b,c)
# pass the current globals to the funcion
my_tiny_wrapper(globals())
g(4,b=2,c='D') # test keyword arguments
peter() # test no arguments
pan(4) # single argument
generating the following result:
~ % python derp.py
('WRAP FUNCTION', 'g')
('WRAP FUNCTION', 'pan')
('WRAP FUNCTION', 'peter')
('wrapped', 'g')
(4, 2, 'D')
('end wrap', 'g')
('wrapped', 'peter')
('end wrap', 'peter')
('wrapped', 'pan')
salat and onions
('end wrap', 'pan')
Here's the solution I was looking for:
import inspect
import time
import random
import sys
random.seed(1)
def foo1():
print("Foo0 processing")
def foo2():
print("Foo2 processing")
def foo3():
print("Foo3 processing")
def wrap_functions_from_this_file():
def wrapper(f):
def _inner(*args, **kwargs):
start = time.time()
print("{0} started".format(f.__name__))
result = f(*args, **kwargs)
time.sleep(random.random())
end = time.time()
print('{0} finished, elapsed time= {1:.4f}s'.format(
f.__name__, end - start))
return _inner
for o in inspect.getmembers(sys.modules[__name__], inspect.isfunction):
globals()[o[0]] = wrapper(o[1])
wrap_functions_from_this_file()
for f in [foo1, foo2, foo3]:
f()
print('-' * 80)

Running "unique" tasks with celery

I use celery to update RSS feeds in my news aggregation site. I use one #task for each feed, and things seem to work nicely.
There's a detail that I'm not sure to handle well though: all feeds are updated once every minute with a #periodic_task, but what if a feed is still updating from the last periodic task when a new one is started ? (for example if the feed is really slow, or offline and the task is held in a retry loop)
Currently I store tasks results and check their status like this:
import socket
from datetime import timedelta
from celery.decorators import task, periodic_task
from aggregator.models import Feed
_results = {}
#periodic_task(run_every=timedelta(minutes=1))
def fetch_articles():
for feed in Feed.objects.all():
if feed.pk in _results:
if not _results[feed.pk].ready():
# The task is not finished yet
continue
_results[feed.pk] = update_feed.delay(feed)
#task()
def update_feed(feed):
try:
feed.fetch_articles()
except socket.error, exc:
update_feed.retry(args=[feed], exc=exc)
Maybe there is a more sophisticated/robust way of achieving the same result using some celery mechanism that I missed ?
Based on MattH's answer, you could use a decorator like this:
from django.core.cache import cache
import functools
def single_instance_task(timeout):
def task_exc(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
lock_id = "celery-single-instance-" + func.__name__
acquire_lock = lambda: cache.add(lock_id, "true", timeout)
release_lock = lambda: cache.delete(lock_id)
if acquire_lock():
try:
func(*args, **kwargs)
finally:
release_lock()
return wrapper
return task_exc
then, use it like so...
#periodic_task(run_every=timedelta(minutes=1))
#single_instance_task(60*10)
def fetch_articles()
yada yada...
From the official documentation: Ensuring a task is only executed one at a time.
Using https://pypi.python.org/pypi/celery_once seems to do the job really nice, including reporting errors and testing against some parameters for uniqueness.
You can do things like:
from celery_once import QueueOnce
from myapp.celery import app
from time import sleep
#app.task(base=QueueOnce, once=dict(keys=('customer_id',)))
def start_billing(customer_id, year, month):
sleep(30)
return "Done!"
which just needs the following settings in your project:
ONCE_REDIS_URL = 'redis://localhost:6379/0'
ONCE_DEFAULT_TIMEOUT = 60 * 60 # remove lock after 1 hour in case it was stale
If you're looking for an example that doesn't use Django, then try this example (caveat: uses Redis instead, which I was already using).
The decorator code is as follows (full credit to the author of the article, go read it)
import redis
REDIS_CLIENT = redis.Redis()
def only_one(function=None, key="", timeout=None):
"""Enforce only one celery task at a time."""
def _dec(run_func):
"""Decorator."""
def _caller(*args, **kwargs):
"""Caller."""
ret_value = None
have_lock = False
lock = REDIS_CLIENT.lock(key, timeout=timeout)
try:
have_lock = lock.acquire(blocking=False)
if have_lock:
ret_value = run_func(*args, **kwargs)
finally:
if have_lock:
lock.release()
return ret_value
return _caller
return _dec(function) if function is not None else _dec
I was wondering why nobody mentioned using celery.app.control.inspect().active() to get the list of the currently running tasks. Is it not real time? Because otherwise it would be very easy to implement, for instance:
def unique_task(callback, *decorator_args, **decorator_kwargs):
"""
Decorator to ensure only one instance of the task is running at once.
"""
#wraps(callback)
def _wrapper(celery_task, *args, **kwargs):
active_queues = task.app.control.inspect().active()
if active_queues:
for queue in active_queues:
for running_task in active_queues[queue]:
# Discard the currently running task from the list.
if task.name == running_task['name'] and task.request.id != running_task['id']:
return f'Task "{callback.__name__}()" cancelled! already running...'
return callback(celery_task, *args, **kwargs)
return _wrapper
And then just applying the decorator to the corresponding tasks:
#celery.task(bind=True)
#unique_task
def my_task(self):
# task executed once at a time.
pass
This solution for celery working at single host with concurency greater 1. Other kinds (without dependencies like redis) of locks difference file-based don't work with concurrency greater 1.
class Lock(object):
def __init__(self, filename):
self.f = open(filename, 'w')
def __enter__(self):
try:
flock(self.f.fileno(), LOCK_EX | LOCK_NB)
return True
except IOError:
pass
return False
def __exit__(self, *args):
self.f.close()
class SinglePeriodicTask(PeriodicTask):
abstract = True
run_every = timedelta(seconds=1)
def __call__(self, *args, **kwargs):
lock_filename = join('/tmp',
md5(self.name).hexdigest())
with Lock(lock_filename) as is_locked:
if is_locked:
super(SinglePeriodicTask, self).__call__(*args, **kwargs)
else:
print 'already working'
class SearchTask(SinglePeriodicTask):
restart_delay = timedelta(seconds=60)
def run(self, *args, **kwargs):
print self.name, 'start', datetime.now()
sleep(5)
print self.name, 'end', datetime.now()

Categories

Resources