python time instance inside class not updating on call - python

I have a small script that polls a database to look for status of certain jobs. I decided to use APScheduler to handle the looping call. I created a decorator to timeout a function if taking too long. The issue I am having here is that the decorator is inside a class and even though I create two instances of the class, inside two different functions, they always have the same start_time. I thought maybe if I move the decorator inside of my class and initialize the start_time in the init call it would update the start_time per instance of the class. When I moved the decorator insdie of the class and assigned self.start_time = datetime.now() the start time updates on each call of the class and thus will never time out. The example of the decorator inside of the class is also below.
def timeout(start, min_to_wait):
def decorator(func):
def _handle_timeout():
scheduler.shutdown(wait=False)
#wraps(func)
def wrapper(*args, **kwargs):
expire = start + timedelta(minutes = min_to_wait)
now = datetime.now()
if now > expire:
_handle_timeout()
return func(*args, **kwargs)
return wrapper
return decorator
class Job(object):
def __init__(self, name, run_id, results):
self.name = name
self.run_id = object_id
self.results = results
self.parcel_id= None
self.status = None
start_time = datetime.now()
#timeout(start_time, config.WAIT_TIME)
def wait_for_results(self):
if self.results:
self.pack_id = self.results[0].get('parcel_id')
self.status = self.results[0].get('status')
return self.results[0]
else:
return False
#timeout(start_time, config.WORK_TIME)
def is_done(self):
status = self.results[0].get('status')
status_map = {'done': True,
'failed': FailedError,
'lost': LostError}
def _get_or_throw(s, map_obj):
value = map_obj.get(s)
if s in ['failed', 'lost']:
raise value(s)
else:
self.status = s
return s
return _get_or_throw(status, status_map)
def job_1(mssql, postgres, runid):
res = get_results(mssql, config.MSSQL, first_query_to_call)
first_job= Job('first_job', runid, res)
step_two = pack_job.wait_for_results()
if step_two:
try:
logger.info(first_job)
if first_job.is_done() == 'done':
scheduler.remove_job('first_job')
scheduler.add_job(lambda: job_two(mssql,
postgres, first_job.object_id, runid), 'interval', seconds=config.POLL_RATE, id='second_job')
except LostError as e:
logger.error(e, exc_info=True)
scheduler.shutdown(wait=False)
except FailedError as e:
logger.error(e, exc_info=True)
scheduler.shutdown(wait=False)
def job_two(mssql, postgres, object_id, runid):
res = get_results(mssql, config.MSSQL, some_other_query_to_run, object_id)
second_job= Job('second_job', runid, res)
step_two = second_job.wait_for_results()
if step_two:
try:
logger.info(second_job)
if second_job.is_done() == 'done':
scheduler.remove_job('second_job')
except LostError as e:
logger.error(e, exc_info=True)
scheduler.shutdown(wait=False)
except FailedError as e:
logger.error(e, exc_info=True)
scheduler.shutdown(wait=False)
if __name__ == '__main__':
runid = sys.argv[1:]
if runid:
runid = runid[0]
scheduler = BlockingScheduler()
run_job = scheduler.add_job(lambda: job_one(pymssql, psycopg2, runid), 'interval', seconds=config.POLL_RATE, id='first_job')
attempt to move decorator inside class:
class Job(object):
def __init__(self, name, run_id, results):
self.name = name
self.run_id = run_id
self.results = results
self.pack_id = None
self.status = None
self.start_time = datetime.now()
def timeout(min_to_wait):
def decorator(func):
def _handle_timeout():
scheduler.shutdown(wait=False)
#wraps(func)
def wrapper(self, *args, **kwargs):
print '**'
print self.start_time
print ''
expire = self.start_time + timedelta(minutes = min_to_wait)
now = datetime.now()
if now > expire:
_handle_timeout()
return func(self, *args, **kwargs)
return wrapper
return decorator
here is an example output from when I use the above decorator.
**
self start time: 2014-10-28 08:57:11.947026
**
self start time: 2014-10-28 08:57:16.976828
**
self start time: 2014-10-28 08:57:21.989064
the start_time needs to stay the same or else I can't timeout the function.

In the first exemple, your start time is initialised when the class statement is executed, which in your case is when the module is first imported in the interpreter.
In the second exemple, the start time is initialized when the class is instanciated. It should not change from one method call to another for a same Job instance. Of course if you keep on creating new instances, the start time will be different for each instance.
Now you didn't post the code using your Job class, so it's hard to tell what the right solution would be.

Related

how to load test a grpc server with locust

i have a simple grpc server that has two services:
signin, ping, encapsulated in the following class that also has a private method to authenticate the requests:
class Listener(pingpong_pb2_grpc.PingPongServiceServicer):
def __init__(self):
self.counter = counter_g
self.last_print_time = time.time()
def __str__(self):
return self.__class__.__name__
def auth_request(self, request, context):
metadata_dict = dict(context.invocation_metadata())
if metadata_dict.get("authorization").split(" ")[1] == "jf90845h5gfip345t8":
pass
else:
print("Auth Failed")
context.abort(grpc.StatusCode.UNAUTHENTICATED, "Auth Failed")
def signin(self, request, context):
"""The signin function is the rpc call that is called by the client"""
if request.username == "test" and request.password == "test":
print('Signin Success')
return pingpong_pb2.SignInResponse(token="jf90845h5gfip345t8", success=True)
else:
print('Signin Failed')
return pingpong_pb2.SignInResponse(token="bad token", success=False)
def ping(self, request, context):
"""The ping function is the rpc call that is called by the client"""#
self.auth_request(request, context)
self.counter += 1
if self.counter > 1000:
print("1000 calls in %3f seconds" % (time.time() - self.last_print_time))
self.last_print_time = time.time()
self.counter = 0
response = pingpong_pb2.Pong(count=request.count + 1)
return response
in order to make the grpc tasks report back execution time and success/failure events, i wrote this decorator:
def grpctask(func):
def wrapper(*args, **kwargs):
# get task's function name
task_name = func.__name__
start = time.time()
result = None
try:
result = func(*args, **kwargs)
except grpc.RpcError as e:
total = int((time.time() - start) * 1000)
events.request_failure.fire(request_type="grpc",
name=task_name,
response_time=total,
response_length=0,
exception=e)
else:
total = int((time.time() - start) * 1000)
events.request_success.fire(request_type="grpc",
name=task_name,
response_time=total,
response_length=5)
return result
return wrapper
my user behaviour is as follows:
every 31 seconds the user should execute:\ (behaviour 1)
ping_server_1
ping_server_2
ping_server_3
(note that each funtion is diffrent that have similar names only)
every 43 seconds the user should excute:\ (behaviour 2)
hello_server_1
hello_server_2
the two user actions should be independent, meaning that the user may execute both at the same time (not really parallel, just wait time between behaviour 1 and 2 should be zero ) \
i wrote the following script, nesting ping_server_1, ping_server_2, ping_server_3 inside a task, made locust not able to show data for each of those sub tasks"
from locust import TaskSet, between, task, User, events, HttpUser, constant, SequentialTaskSet
import random
import grpc
from google.protobuf import json_format
from client import PingClient
import time
from tools import grpctask
class TaskOne(SequentialTaskSet):
#task
class PingTest(SequentialTaskSet):
host = "localhost:9999"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stub = None
self.vacancy_id = None
self.token = None
self.ping_client = PingClient(host="localhost:9999")
def on_start(self):
self.connect_to_server()
self.login()
def connect_to_server(self):
# use the ping client to connect to the server
self.ping_client.connect_to_server()
def login(self):
# use the ping client to login
self.ping_client.set_token()
#task
#grpctask
def ping_server(self):
self.ping_client.ping()
#task
#grpctask
def ping_server_2(self):
self.ping_client.ping()
#task
#grpctask
def ping_server_3(self):
self.ping_client.ping()
self.interrupt()
#task
def empty(self):
print("PingTest is empty")
self.interrupt()
class TaskTwo(SequentialTaskSet):
#task
class HelloServer(TaskSet):
host = "localhost:9999"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stub = None
self.vacancy_id = None
self.token = None
self.ping_client = PingClient(host="localhost:9999")
def on_start(self):
self.connect_to_server()
self.login()
def connect_to_server(self):
# use the ping client to connect to the server
self.ping_client.connect_to_server()
def login(self):
# use the ping client to login
self.ping_client.set_token()
#task
#grpctask
def hello_server(self):
self.ping_client.ping()
#task
#grpctask
def hello_server_2(self):
self.ping_client.ping()
self.interrupt()
#task
def empty(self):
print("TaskTwo is empty")
self.interrupt()
class PingUser(User):
# force TaskOne to be executed every 31 seconds,
# and TaskTwo to be executed every 43 seconds
tasks = [TaskOne, TaskTwo]
is there a way to define a wait time for TaskOne and TaskTwo independetly from each other?
if not, what can be done to achieve the user behaviour described above while still treating each function as a task to get metrics for each function (task) (write each action as one function wont give metrics on each function)

Python Decorator Log file not generating

I am new to Python and learning logging technique with Decorator.
For me the below code is not generating required log file. Debugged the code, getting correct message to logger statement but the file is not generating. From Test method i am call the required function where i have implemented Decorator. Please guide where i am doing mistake.
try:
import csv
import requests
import datetime
import os
import sys
import logging
except Exception as e:
print("Some Modules are missing {}".format(e))
class Meta(type):
""" Meta class"""
def __call__(cls, *args, **kwargs):
instance = super(Meta, cls).__call__(*args, **kwargs)
return instance
def __init__(cls, name, base, attr):
super(Meta, cls).__init__(name, base, attr)
class log(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
""" Wrapper Function"""
start = datetime.datetime.now() #start time
Tem = self.func(*args) #call Function
Argument = args
FunName = self.func.__name__ #get Function name
end = datetime.datetime.now() #end Time
message = """
Function : {}
Execustion Time : {}
Argument : {}
Memory : {} Bytes
Date : {}
""".format(FunName,
end-start,
Argument,
sys.getsizeof(self.func),
start
)
cwd = os.getcwd();
folder = 'Logs'
newPath = os.path.join(cwd, folder)
try:
"""Try to create a folder """
os.mkdir(newPath)
except:
"""Folder already exist """
logging.basicConfig(filename='apiRun.log'.format(newPath), level=logging.DEBUG)
logging.debug(message)
return Tem
class APIHelper(metaclass=Meta):
def __init__(self, *args, **kwargs):
pass
#log
def star_wars_characters(url):
#self.url = url
api_response = requests.get(url)
people = []
if api_response.status_code == 200:
data = api_response.json()
for d in data['results']:
character = []
character.append(d['name'])
character.append(d['height'])
character.append(d['gender'])
people.append(character)
return people
else:
return "Bad Request"
My Test Method:
import unittest
import csv
from com.Script.APIHelper import APIHelper
class TestAPI(unittest.TestCase):
def _setUp(self, file_name):
self.api = APIHelper()
with open(file_name, "w") as self.fd:
self.csvfile = csv.writer(self.fd, delimiter = ',')
self.csvfile.writerow(['Name','Height','Gender'])
def tearDown(self):
self.fd.close()
def test_responseNotEmpty(self):
file_name = 'SWAPI.csv'
self._setUp(file_name)
people = self.api.star_wars_characters("https://swapi.dev/api/people/")
assert type(people) is list
Thanks you in Advance.
Add finally
Change filename='apiRun.log' to filename='{}/apiRun.log'
try:
"""Try to create a folder """
os.mkdir(newPath)
except:
"""Folder already exist """
finally:
logging.basicConfig(filename='{}/apiRun.log'.format(newPath), level=logging.DEBUG)
logging.debug(message)
except is executed only when an exception is raised from try.
finally is always executed.

Check how much the method works in realTime

I have specific Task, I need know how long work some method in realtime. If some method work long I need raise Exception.
this is my test method:
#timer
def get_mails(self):
print("start method")
max_sec1 = 7
current_sec1 = 0
while max_sec1 != current_sec1:
time.sleep(1)
current_sec1 += 1
print("method is finish")
Bellow i write decorator, he controle how many times it work
class TickTack(threading.Thread):
def __init__(self, func):
super(TickTack, self).__init__()
self._stop_event = threading.Event()
self.func = func
def run(self):
self.run_function()
def run_function(self):
self.func(self)
def terminate(self):
self._stop_event.set()
def timer(func):
#functools.wraps(func)
def wrapper(*args):
print("start wrapper")
max_sec = 3 # max work sec
current_sec = 0
tick_tack_thread = TickTack(func)
tick_tack_thread.start()
while max_sec != current_sec:
time.sleep(1)
current_sec += 1
print(current_sec)
if max_sec == current_sec:
tick_tack_thread.terminate() # stop thread!!! But it not work
raise Exception("Method work so long")
print("end wrapper")
return wrapper
this code not stoped get_mails functions. But I need stoped(raise Exception some method(my case method get_mails ))

Start python Process with output and timeout

I'm trying to find the way to start a new Process and get its output if it takes less than X seconds. If the process takes more time I would like to ignore the Process result, kill the Process and carry on.
I need to basically add the timer to the code below. Now sure if there's a better way to do it, I'm open to a different and better solution.
from multiprocessing import Process, Queue
def f(q):
# Ugly work
q.put(['hello', 'world'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print q.get()
p.join()
Thanks!
You may find the following module useful in your case:
Module
#! /usr/bin/env python3
"""Allow functions to be wrapped in a timeout API.
Since code can take a long time to run and may need to terminate before
finishing, this module provides a set_timeout decorator to wrap functions."""
__author__ = 'Stephen "Zero" Chappell ' \
'<stephen.paul.chappell#atlantis-zero.net>'
__date__ = '18 December 2017'
__version__ = 1, 0, 1
__all__ = [
'set_timeout',
'run_with_timeout'
]
import multiprocessing
import sys
import time
DEFAULT_TIMEOUT = 60
def set_timeout(limit=None):
"""Return a wrapper that provides a timeout API for callers."""
if limit is None:
limit = DEFAULT_TIMEOUT
_Timeout.validate_limit(limit)
def wrapper(entry_point):
return _Timeout(entry_point, limit)
return wrapper
def run_with_timeout(limit, polling_interval, entry_point, *args, **kwargs):
"""Execute a callable object and automatically poll for results."""
engine = set_timeout(limit)(entry_point)
engine(*args, **kwargs)
while engine.ready is False:
time.sleep(polling_interval)
return engine.value
def _target(queue, entry_point, *args, **kwargs):
"""Help with multiprocessing calls by being a top-level module function."""
# noinspection PyPep8,PyBroadException
try:
queue.put((True, entry_point(*args, **kwargs)))
except:
queue.put((False, sys.exc_info()[1]))
class _Timeout:
"""_Timeout(entry_point, limit) -> _Timeout instance"""
def __init__(self, entry_point, limit):
"""Initialize the _Timeout instance will all needed attributes."""
self.__entry_point = entry_point
self.__limit = limit
self.__queue = multiprocessing.Queue()
self.__process = multiprocessing.Process()
self.__timeout = time.monotonic()
def __call__(self, *args, **kwargs):
"""Begin execution of the entry point in a separate process."""
self.cancel()
self.__queue = multiprocessing.Queue(1)
self.__process = multiprocessing.Process(
target=_target,
args=(self.__queue, self.__entry_point) + args,
kwargs=kwargs
)
self.__process.daemon = True
self.__process.start()
self.__timeout = time.monotonic() + self.__limit
def cancel(self):
"""Terminate execution if possible."""
if self.__process.is_alive():
self.__process.terminate()
#property
def ready(self):
"""Property letting callers know if a returned value is available."""
if self.__queue.full():
return True
elif not self.__queue.empty():
return True
elif self.__timeout < time.monotonic():
self.cancel()
else:
return False
#property
def value(self):
"""Property that retrieves a returned value if available."""
if self.ready is True:
valid, value = self.__queue.get()
if valid:
return value
raise value
raise TimeoutError('execution timed out before terminating')
#property
def limit(self):
"""Property controlling what the timeout period is in seconds."""
return self.__limit
#limit.setter
def limit(self, value):
self.validate_limit(value)
self.__limit = value
#staticmethod
def validate_limit(value):
"""Verify that the limit's value is not too low."""
if value <= 0:
raise ValueError('limit must be greater than zero')
To use, see the following example that demonstrates its usage:
Example
from time import sleep
def main():
timeout_after_four_seconds = timeout(4)
# create copies of a function that have a timeout
a = timeout_after_four_seconds(do_something)
b = timeout_after_four_seconds(do_something)
c = timeout_after_four_seconds(do_something)
# execute the functions in separate processes
a('Hello', 1)
b('World', 5)
c('Jacob', 3)
# poll the functions to find out what they returned
results = [a, b, c]
polling = set(results)
while polling:
for process, name in zip(results, 'abc'):
if process in polling:
ready = process.ready
if ready is True: # if the function returned
print(name, 'returned', process.value)
polling.remove(process)
elif ready is None: # if the function took too long
print(name, 'reached timeout')
polling.remove(process)
else: # if the function is running
assert ready is False, 'ready must be True, False, or None'
sleep(0.1)
print('Done.')
def do_something(data, work):
sleep(work)
print(data)
return work
if __name__ == '__main__':
main()
Does the process you are running involve a loop?
If so you can get the timestamp prior to starting the loop and include an if statement within the loop with an sys.exit(); command terminating the script if the current timestamp differs from the recorded start time stamp by more than x seconds.
All you need to adapt the queue example from the docs to your case is to pass the timeout to the q.get() call and terminate the process on timeout:
from Queue import Empty
...
try:
print q.get(timeout=timeout)
except Empty: # no value, timeout occured
p.terminate()
q = None # the queue might be corrupted after the `terminate()` call
p.join()
Using a Pipe might be more lightweight otherwise the code is the same (you could use .poll(timeout), to find out whether there is a data to receive).

How do I Spawn threads from two different objects and coordinate them in python v2.7?

I am trying to combine the answers I got from two different python questions.
Here is the the first question and answer. Basically I just wanted to spawn two threads, one to powerDown() and the other to powerUp(), where powerUp() pends on powerDown()
How to spawn a thread inside another thread in the same object in python?
import threading
class Server(threading.Thread):
# some code
def run(self):
self.reboot()
# This is the top level function called by other objects
def reboot(self):
# perhaps add a lock
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
Here is the the second question and answer. Basically I wanted to start a thread, and then call a function of the object.
How to call a function on a running Python thread
import queue
import threading
class SomeClass(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(SomeClass, self).__init__()
def onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
def doSomething(self):
pass
def doSomethingElse(self):
pass
Here is combined idea code. Basically I wanted to spawn a thread, then then queue up a functions to execute, which in this case is reboot(). reboot() in turns creates two threads, the powerDown() and powerUp() threads, where powerDown() pends on powerUp()
import threading
import Queue
class Server(threading.Thread):
def __init__(self, q, loop_time = 1.0/60):
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except queue.Empty:
self.idle()
def idle(self):
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onthread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
def __powerUp(self):
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
All work, except when I create two Server subclasses.
class ServerA(Server):
pass
class ServerB(Server):
pass
Here is the code that instatiats both subclasses, and call the start() and reboot functions
serverA = ServerA(None)
serverB = ServerB(None)
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
I expect serverA.reboot() and serverB.reboot() to happen concurrently, which is what I want, but they DO NOT! serverB.reboot() gets executed after serverA.reboot() is done. That is, if I put print statements, I get
serverA started
serverB started
serverA.reboot() called
serverA.__powerDown called
serverA.__powerUp called
serverB.reboot() called
serverB.__powerDown called
serverB.__powerUp called
I know for a fact that it takes longer for ServerA to reboot, so I expect something like this
serverA started
serverB started
serverA.reboot() called
serverB.reboot() called
serverA.__powerDown called
serverB.__powerDown called
serverB.__powerUp called
serverA.__powerUp called
I hope that makes sense. If it does, why aren't my reboot() functions happening simultaneously?
Why are you sending None while you are expecting a queue object in the first place ? This causes an exception which complains that None type object doesn't have a get method. Besides that the exception you want to be handled in the run method is Queue.Empty and not queue.Empty.
Here is the revised code and its output on my machine:
import threading
import Queue
class Server(threading.Thread):
def __init__(self, title, q, loop_time = 1.0/60):
self.title = title
self.q = q
self.timeout = loop_time
super(Server, self).__init__()
def run(self):
print "%s started" % self.title
while True:
try:
function, args, kwargs = self.q.get(timeout=self.timeout)
function(*args, **kwargs)
except Queue.Empty:
# print "empty"
self.idle()
def idle(self):
pass
# put the code you would have put in the `run` loop here
# This is the top level function called by other objects
def reboot(self):
self.__onThread(self.__reboot)
def __reboot(self):
if not hasattr(self, "_down"):
self._down = threading.Thread(target=self.__powerDown)
self._down.start()
up = threading.Thread(target=self.__powerUp)
up.start()
def __onThread(self, function, *args, **kwargs):
self.q.put((function, args, kwargs))
def __powerDown(self):
# do something
print "%s power down" % self.title
pass
def __powerUp(self):
print "%s power up" % self.title
if not hasattr(self, "_down"):
return
self._down.join()
# do something
del self._down
class ServerA(Server):
pass
class ServerB(Server):
pass
def main():
serverA = ServerA("A", Queue.Queue())
serverB = ServerB("B", Queue.Queue())
serverA.start()
serverB.start()
serverA.reboot()
serverB.reboot()
if __name__ == '__main__':
main()
Output:
A started
B started
B power down
A power down
B power up
A power up

Categories

Resources