How can I run a function for just 5 minutes? - python

import logging
from a.x.models import X
from a.x.management.commands.syncx \
import Command as SyncCommand
from a.x.adapter_classes import ADAPTER_CLASSES
LOGGER = logging.getLogger(__name__)
def logger_function(code):
if not X.objects.filter(code=code).exists():
X.objects.create(code=code)
LOGGER.info(f"{X} created")
args = []
kwargs = {'x_code': code,
'class': False,
'database': 'default'}
try:
LOGGER.info(f"Starting syncx command for {code}")
#or this command needs to be run just 5 minutes for every key
SyncCommand().handle(*args, **kwargs)
LOGGER.info(f"There is no error for {code}")
except Exception as error:
with open("logger.txt", "a") as file:
file.write(f"{code}'s error is : {error}")
LOGGER.info(f"Logging error about {code}\n")
def run():
for key in ADAPTER_CLASSES.keys():
#this function needs to be run just 5 minutes for every key
logger_function(key)
My logger_function needs to be run for 5 minutes. Is there any timer decorator or thread destroyer with timer ? How can I do this.
My for loop is shifting in keys and sending to logger function , if there any problem for try except block its okey , but if everything right for my SyncCommand it can take a many hours, bu i just want to logging errors in first 5 minutes.

Is there any timer decorator
If you are allowed to use external libraries I suggest taking look at timeout-decorator.

# importing the required module
import timeit
# code snippet to be executed only once
mysetup = "from math import sqrt"
# code snippet whose execution time is to be measured
mycode = '''
def example():
mylist = []
for x in range(100):
mylist.append(sqrt(x))
'''
# timeit statement
print timeit.timeit(setup = mysetup,
stmt = mycode,
number = 10000)

I solved it with using signal library
def handler(signum, frame):
raise Exception(None)
#do stuff func
for key in ADAPTER_CLASSES.keys():
signal.signal(signal.SIGALRM, handler)
signal.alarm(300) #5 min
logger_function(key)
signal.alarm(0)

Related

How to get every second's GPU usage in Python

I have a model which runs by tensorflow-gpu and my device is nvidia. And I want to list every second's GPU usage so that I can measure average/max GPU usage. I can do this mannually by open two terminals, one is to run model and another is to measure by nvidia-smi -l 1. Of course, this is not a good way. I also tried to use a Thread to do that, here it is.
import subprocess as sp
import os
from threading import Thread
class MyThread(Thread):
def __init__(self, func, args):
super(MyThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
return self.result
def get_gpu_memory():
output_to_list = lambda x: x.decode('ascii').split('\n')[:-1]
ACCEPTABLE_AVAILABLE_MEMORY = 1024
COMMAND = "nvidia-smi -l 1 --query-gpu=memory.used --format=csv"
memory_use_info = output_to_list(sp.check_output(COMMAND.split()))[1:]
memory_use_values = [int(x.split()[0]) for i, x in enumerate(memory_use_info)]
return memory_use_values
def run():
pass
t1 = MyThread(run, args=())
t2 = MyThread(get_gpu_memory, args=())
t1.start()
t2.start()
t1.join()
t2.join()
res1 = t2.get_result()
However, this does not return every second's usage as well. Is there a good solution?
In the command nvidia-smi -l 1 --query-gpu=memory.used --format=csv
the -l stands for:
-l, --loop= Probe until Ctrl+C at specified second interval.
So the command:
COMMAND = 'nvidia-smi -l 1 --query-gpu=memory.used --format=csv'
sp.check_output(COMMAND.split())
will never terminate and return.
It works if you remove the event loop from the command(nvidia-smi) to python.
Here is the code:
import subprocess as sp
import os
from threading import Thread , Timer
import sched, time
def get_gpu_memory():
output_to_list = lambda x: x.decode('ascii').split('\n')[:-1]
ACCEPTABLE_AVAILABLE_MEMORY = 1024
COMMAND = "nvidia-smi --query-gpu=memory.used --format=csv"
try:
memory_use_info = output_to_list(sp.check_output(COMMAND.split(),stderr=sp.STDOUT))[1:]
except sp.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
memory_use_values = [int(x.split()[0]) for i, x in enumerate(memory_use_info)]
# print(memory_use_values)
return memory_use_values
def print_gpu_memory_every_5secs():
"""
This function calls itself every 5 secs and print the gpu_memory.
"""
Timer(5.0, print_gpu_memory_every_5secs).start()
print(get_gpu_memory())
print_gpu_memory_every_5secs()
"""
Do stuff.
"""
Here is a more rudimentary way of getting this output, however just as effective - and I think easier to understand. I added a small 10-value cache to get a good recent average and upped the check time to every second. It outputs average of the last 10 seconds and the current each second, so operations that cause usage can be identified (what I think the original question was).
import subprocess as sp
import time
memory_total=8192 #found with this command: nvidia-smi --query-gpu=memory.total --format=csv
memory_used_command = "nvidia-smi --query-gpu=memory.used --format=csv"
isolate_memory_value = lambda x: "".join(y for y in x.decode('ascii') if y in "0123456789")
def main():
percentage_cache = []
while True:
memory_used = isolate_memory_value(sp.check_output(memory_used_command.split(), stderr=sp.STDOUT))
percentage = float(memory_used)/float(memory_total)*100
percentage_cache.append(percentage)
percentage_cache = percentage_cache[max(0, len(percentage_cache) - 10):]
print("curr: " + str(percentage) + " %", "\navg: " + str(sum(percentage_cache)/len(percentage_cache))[:4] + " %\n")
time.sleep(1)
main()

Multiprocessing With r2pipe

I'm having issues with using r2pipe, Radare2's API, with the multiprocessing Pool.map function in python. The problem I am facing is the application hangs on pool.join().
My hope was to use multithreading via the multiprocessing.dummy class in order to evaluate functions quickly through r2pipe. I have tried passing my r2pipe object as a namespace using the Manager class. I have attempted using events as well, but none of these seem to work.
class Test:
def __init__(self, filename=None):
if filename:
self.r2 = r2pipe.open(filename)
else:
self.r2 = r2pipe.open()
self.r2.cmd('aaa')
def t_func(self, args):
f = args[0]
r2_ns = args[1]
print('afbj # {}'.format(f['name']))
try:
bb = r2_ns.cmdj('afbj # {}'.format(f['name']))
if bb:
return bb[0]['addr']
else:
return None
except Exception as e:
print(e)
return None
def thread(self):
funcs = self.r2.cmdj('aflj')
mgr = ThreadMgr()
ns = mgr.Namespace()
ns.r2 = self.r2
pool = ThreadPool(2)
results = pool.map(self.t_func, product(funcs, [ns.r2]))
pool.close()
pool.join()
print(list(results))
This is the class I am using. I make a call to the Test.thread function in my main function.
I expect the application to print out the command it is about to run in r2pipe afbj # entry0, etc. Then to print out the list of results containing the first basic block address [40000, 50000, ...].
The application does print out the command about to run, but then hangs before printing out the results.
ENVIRONMENT
radare2: radare2 4.2.0-git 23712 # linux-x86-64 git.4.1.1-97-g5a48a4017
commit: 5a48a401787c0eab31ecfb48bebf7cdfccb66e9b build: 2020-01-09__21:44:51
r2pipe: 1.4.2
python: Python 3.6.9 (default, Nov 7 2019, 10:44:02)
system: Ubuntu 18.04.3 LTS
SOLUTION
This may be due to passing the same instance of r2pipe.open() to every call of t_func in the pool. One solution is to move the following lines of code into t_func:
r2 = r2pipe.open('filename')
r2.cmd('aaa')
This works, however its terribly slow to reanalyze for each thread/process.
Also, it is often faster to allow radare2 to do as much of the work as possible and limit the number of commands we need to send using r2pipe.
This problem is solved by using the command: afbj ##f
afbj # List basic blocks of given function and show results in json
##f # Execute the command for each function
EXAMPLE
Longer Example
import r2pipe
R2: r2pipe.open_sync = r2pipe.open('/bin/ls')
R2.cmd("aaaa")
FUNCS: list = R2.cmd('afbj ##f').split("\n")[:-1]
RESULTS: list = []
for func in FUNCS:
basic_block_info: list = eval(func)
first_block: dict = basic_block_info[0]
address_first_block: int = first_block['addr']
RESULTS.append(hex(address_first_block))
print(RESULTS)
'''
['0x4a56', '0x1636c', '0x3758', '0x15690', '0x15420', '0x154f0', '0x15420',
'0x154f0', '0x3780', '0x3790', '0x37a0', '0x37b0', '0x37c0', '0x37d0', '0x0',
...,
'0x3e90', '0x6210', '0x62f0', '0x8f60', '0x99e0', '0xa860', '0xc640', '0x3e70',
'0xd200', '0xd220', '0x133a0', '0x14480', '0x144e0', '0x145e0', '0x14840', '0x15cf0']
'''
Shorter Example
import r2pipe
R2 = r2pipe.open('/bin/ls')
R2.cmd("aaaa")
print([hex(eval(func)[0]['addr']) for func in R2.cmd('afbj ##f').split("\n")[:-1]])

How to write data to a file every 10 seconds

I'm a JS dev trying to learn a bit of Python while working on a Raspberry-Pi3 project that reads data from a Bluetooth temperature sensor.
I need to write the data to my file.txt every 10 seconds, how could I do that please? I found similar topic here (Run certain code every n seconds ), but I don't know how to make it work in my current scenario.
#!/usr/bin/env python3
import argparse
import re
import logging
import sys
import time
from btlewrap import available_backends, BluepyBackend, GatttoolBackend, PygattBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
def valid_mitemp_mac(mac, pat=re.compile(r"4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac adresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError('The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def poll(args):
"""Poll data from the sensor."""
backend = _get_backend(args)
poller = MiTempBtPoller(args.mac, backend)
line1 = "Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE))
line2 = "Humidity: {}".format(poller.parameter_value(MI_HUMIDITY))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print(line1)
print(line2)
f = open('file.txt', 'w')
f.write("%s \n %s \n" % (line1, line2))
f.close()
def _get_backend(args):
"""Extract the backend class from the command line arguments."""
if args.backend == 'gatttool':
backend = GatttoolBackend
elif args.backend == 'bluepy':
backend = BluepyBackend
elif args.backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--backend', choices=['gatttool', 'bluepy', 'pygatt'], default='gatttool')
parser.add_argument('-v', '--verbose', action='store_const', const=True)
subparsers = parser.add_subparsers(help='sub-command help', )
parser_poll = subparsers.add_parser('poll', help='poll data from a sensor')
parser_poll.add_argument('mac', type=valid_mitemp_mac)
parser_poll.set_defaults(func=poll)
parser_scan = subparsers.add_parser('backends', help='list the available backends')
parser_scan.set_defaults(func=list_backends)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if not hasattr(args, "func"):
parser.print_help()
sys.exit(0)
args.func(args)
if __name__ == '__main__':
main()
You can use the time module to pause the program for 10 seconds on each iteration;
from time import sleep
def func(n):
print(n+1)
for i in range(5):
func(i)
sleep(10)
>1
>2
>3
>4
>5
# (every 10 seconds)
However this will block the rest of the program running, although a simple multi-threading script to call the writing function would suffice.
And in relation to the code you are using, insert the sleep call within the poll function and wrap what you have there. If you want to loop the program 10 times then;
def poll(args):
"""Poll data from the sensor."""
for _ in range(10):
# code things
f = open('file.txt', 'a') # << Use append here or you will keep overwriting file contents
f.write('hello')
f.close()
sleep(10)
Or if you want it to run forever until you KeyboardInterrupt or exit somehow:
def poll(args):
"""Poll data from the sensor."""
while True:
# code things
f = open('file.txt', 'a') # << Use append here or you will keep overwriting file contents
f.write('hello')
f.close()
sleep(10)
you need some kind of loop that polls your sensor - I do not see one glancing over your code. You got while and for loops in JS as well - look them up in http://docs.python.org/3/tutorial if you are unsure about the syntax.
store the time you wrote to a variable , sleep a bit poll the next value, check if 10s passed, write if, else not. (or simply sleep 10s between polls if you do not want intermediate values printed
Readup about loops:
for statement
looping techniques
import time
def poll():
return time.time(), 42
last_write = None # when did we record to file last?
# loop for as long as you want - while True would loop endlessly
for _ in range(7):
t,c = poll() # call poll() to get time and temperature from mocked data reader
# check if enough time has passed
if last_write is None or (t-last_write) > 2: # check if new reading needed
with open("t.txt","a") as f:
f.write(f"{t} {c}\n")
last_write=t
print("in file ", t,c)
else:
print("just output ", t,c)
time.sleep(0.7) # sleep some
Output:
in file 1552978725.5224085 42 # ...25.5
just output 1552978726.2232893 42 # ...26.2 - not 2s passed
just output 1552978726.9241226 42 # ...26.9 - not 2s passed
in file 1552978727.6249442 42 # ...27.6 - 2.1s passed
just output 1552978728.3259027 42 # ...28.3 - not 2s passed
just output 1552978729.0267787 42 # ...29.0 - not 2s passed
in file 1552978729.7275977 42 # ...29.7 - 2.1s passed
More remarks:
use with open(filename,mode) as f: and scope the file opeations below it - it will autoclose your file after scope and handle exceptions by closing the filehandle as well.
Using mode "w" will truncate the file before writing to it - you might want to use append instead: reading and writing files

Python - For loop finishing before it is supposed to

I am currently executing tasks via a thread pool based on a for loop length, and it is ending its execution when it is not supposed to (before end of loop). Any ideas why? Here is the relavent code:
from classes.scraper import size
from multiprocessing import Pool
import threading
if __name__ == '__main__':
print("Do something")
size = size()
pool = Pool(processes=50)
with open('size.txt','r') as file:
asf = file.read()
for x in range(0,1000000):
if '{num:06d}'.format(num=x) in asf:
continue
else:
res = pool.apply_async(size.scrape, ('{num:06d}'.format(num=x),))
Here is the console output (I am printing out the values inside size.scrape().
...
...
...
013439
013440
013441
013442
013443
Process finished with exit code 0

Python script setting breakpoints and the encounter of a comment

I am trying to build a script that sets breakpoints in winIDEA (this ide will run a project, that has different comments). The breakpoints must be set in winIDEA after a specific comment is recognised. I am somewhat new to this language and I am having problems making this script. I am not sure if what I have here is good, but I am trying to get the line where the comment is recognised, and then set a breakpoint in the program at this specific line.
import inspect
import logging
import isystem.connect as ic
import sys
connMgr = ic.ConnectionMgr()
connMgr.connectMRU('')
dbg = ic.CDebugFacade(connMgr)
bc = ic.CBreakpointController(connMgr)
exe = ic.CExecutionController(connMgr)
logging.basicConfig(
format = "%(levelname) -10s %(asctime)s %(message)s",
level = logging.DEBUG
def test():
caller_list = []
frame = inspect.currentframe()
this_frame = frame # Save current frame.
while frame.f_back:
print frame
caller_list.append('{0}()'.format(frame.f_code.co_name))
frame = frame.f_back
caller_line = this_frame.f_back.f_lineno
callers = '/'.join(reversed(caller_list))
logging.info('Line {0} : {1}'.format(caller_line, callers))
print caller_line
def foo():
test()
def bar():
foo()
test()
datafile= file(r'C:\Documents and Settings\stiral1\Desktop\function.py')
stringfile=datafile.read()
item_search="haha"
counter=0
print stringfile.find(item_search,counter)
while counter != -1:
print stringfile.find(item_search,counter)
bc.setBP(counter,r'C:\_DevTools\winIDEA\2012\Examples\Simulator\PPC\Simple\main.c')
I get instead a random line, and the position where I encounter my element in the string (I make the script that runs in ide a string at some point). I have no idea left... Help a newbie!
This is what worked up for me:
def wdSetBPComment(wdPrmStr):
assert(wdGetNrOfSubStrings(wdPrmStr) == 2)
comment = wdGetSubString_1(wdPrmStr)
function = wdGetSubString_2(wdPrmStr)
gl_tpLocation.setResourceName(function)
gl_tpLocation.setSearch(ic.E_TRUE)
gl_tpLocation.setMatchingType(ic.CTestLocation.E_MATCH_PLAIN)
gl_tpLocation.setSearchPattern(comment)
lineLocation = addrCtrl.getSourceLocation(gl_tpLocation)
wdSetBreakpoint(lineLocation.getFileName()+lineLocation.getLineNumber())
gl_bcCtrl.setBP(lineNo,fileName)
return (gl_wdOkStr)

Categories

Resources