unable to handle exception in django while implementing multithreading - python

views.py:
from threading import Thread
class PoliceJobs:
def call_police_defence_jobs(request):
job = PoliceDefenceJobs.police_jobs(request)
sleep(0.5)
job_details = PoliceDefenceJobDetails.police_defence_job_details(request)
message = call_all(job,job_details)
return HttpResponse(message)
def call_statewise_police_jobs(request):
job = PoliceDefenceJobs.statewise_police_jobs(request)
sleep(0.5)
job_details = PoliceDefenceJobDetails.statewise_police_job_details(request)
message = call_all(job,job_details)
return HttpResponse(message)
def police_jobs(request):
try:
t1 = Thread(target=PoliceJobs.call_police_defence_jobs,args=[request])
t2 = Thread(target=PoliceJobs.call_statewise_police_jobs,args=[request])
t1.start()
t2.start()
t1.join()
t2.join()
return HttpResponse("success")
except:
return HttpResponse("error")
urls.py
from django.urls import path
from .views import police_jobs
urlpatterns = [
path('finish_police_jobs/', police_jobs),
]
error in shell:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/home/soubhagya/Desktop/carrier-circle/backend/finalize/views.py", line 840, in call_police_defence_jobs
job = PoliceDefenceJobs.police_jobs(request)
AttributeError: type object 'PoliceDefenceJobs' has no attribute 'police_jobs'
in PoliceJobs class i changed the function name to PoliceDefenceJobs.police_jobs which is not exist to make error.
Here i am making error and handling it by adding except block but
it is still showing error in console but not in browser.
in browser it is showing success wheather there is an exception.

Exceptions in threads don't propagate to the thread that created them. See Catch a thread's exception in the caller thread in Python for a workaround.

Related

Python APSCheduler throwing exception after removing job

I am adding job in redis and on completion of job I have added an event handler.
In eventhandler I am returning value based on which I am removing job id from jobstore. It is removed successfully but immediately it throws an exception.
Code
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.events import EVENT_JOB_EXECUTED
import logging
logging.basicConfig()
scheduler = BackgroundScheduler()
scheduler.add_jobstore('redis')
scheduler.start()
def tick():
print('Tick! The time is: %s' % datetime.now())
return 'success'
def removing_jobs(event):
if event.retval == 'success':
scheduler.remove_job(event.job_id)
scheduler.add_listener(removing_jobs, EVENT_JOB_EXECUTED)
try:
count = 0
while True:
count += 1
time.sleep(10)
job_ret = scheduler.add_job(tick, 'interval', id = str(count), seconds=10)
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
Exception
Exception in thread APScheduler:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/.virtualenvs/py3/lib/python3.5/site-packages/apscheduler/schedulers/blocking.py", line 30, in _main_loop
wait_seconds = self._process_jobs()
File "/.virtualenvs/py3/lib/python3.5/site-packages/apscheduler/schedulers/base.py", line 995, in _process_jobs
jobstore.update_job(job)
File "/.virtualenvs/py3/lib/python3.5/site-packages/apscheduler/jobstores/redis.py", line 91, in update_job
raise JobLookupError(job.id)
apscheduler.jobstores.base.JobLookupError: 'No job by the id of 1 was found'
In short: you are removing the job while it is processed;
so you should remove the job outside its execution.
That's because the scheduler doesn't know what the job's execution will do; so it launches tick and sends a job object to the redis jobstore thinking it will be executed again. Before that, the EVENT_JOB_LISTENER launches removing_jobs.
The problem is that when the redis' jobstore gets the job for update its status, it is already deleted so it raises the JobLookupError.

Flask object has no attribute app_context

I'm trying to send emails periodically with flask mail, but I'm stuck with this error: Flask object has no attribute app_context
def mail_periodic():
print "sending mail at " +time.ctime()
app = current_app._get_current_object()
msg = Message("no-reply: Avantgarde.Rentals",
sender="avantgarde.rentals.noreply#gmail.com",
)
msg.add_recipient('aladinne.k#gmail.com')
msg.body = 'Email periodic '
mail2 = Mail(app)
with app.app_context():
mail2.send(msg)
print"email sent "
threading.Timer(5, mail_periodic).start()
#app.route('/startcronemailing')
def startcronemailing():
try:
mail_periodic()
except Exception, exception:
return exception.message
return "crone mailing started"
the exception that i got :
Exception in thread Thread-3:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 801, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 1073, in run
self.function(*self.args, **self.kwargs)
File "app.py", line 113, in mail_periodic
host_link='http://' + request.host,
File "C:\Python27\lib\site-packages\werkzeug\local.py", line 336, in __getattr__
return getattr(self._get_current_object(), name)
File "C:\Python27\lib\site-packages\werkzeug\local.py", line 295, in _get_current_object
return self.__local()
File "C:\Python27\lib\site-packages\flask\globals.py", line 19, in _lookup_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
please note that even if i use another mailing service like sendgrid i got the same error
You have to pass app instance as args. If you use current_app._get_current_object() to get app instance inside target function, you will not get the right app in another thread. For example:
from threading import Thread
from flask import current_app
from flask_mail import Message
from bluelog.extensions import mail
def _send_async_mail(app, message): # target function
with app.app_context():
mail.send(message)
def send_async_mail(subject, to, html):
app = current_app._get_current_object() # get the real app instance
message = Message(subject, recipients=[to], html=html)
thr = Thread(target=_send_async_mail, args=[app, message]) # pass app
thr.start()
return thr

How to handle Python's threading.Thread exceptions?

I'm using Selenium to run two Firefox instances and get specifics pages.
I'm loading the pages parallel using threading.Thread for each of them.
I also want to set timeout for maximum page loading time, with browser.set_page_load_timeout() in my code.
My whole code looks like this:
from selenium import webdriver
from threading import Thread
from selenium.common.exceptions import TimeoutException
class Test():
def __init__(self):
browser = webdriver.Firefox()
def load_page(browser, url):
browser.set_page_load_timeout(20)
browser.get(url)
t = Thread(target=load_page, args=(browser, 'http://www.stackoverflow.com', ))
t.start()
t.join()
if __name__ == '__main__':
try:
Test()
except TimeoutException:
print "timeout reached"
In spite of my try except declaration, i still got this error:
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 801, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "C:\Temp\test_b.py", line 13, in load_page
browser.get(url)
File "C:\Python27\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 213, in get
self.execute(Command.GET, {'url': url})
File "C:\Python27\lib\site-packages\selenium\webdriver\remote\webdriver.py", line 201, in execute
self.error_handler.check_response(response)
File "C:\Python27\lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 181, in check_response
raise exception_class(message, screen, stacktrace)
TimeoutException: Message: Timed out waiting for page load.
Stacktrace:
at Utils.initWebLoadingListener/< (file:///c:/users/mgal/appdata/local/temp/tmpsasxck/extensions/fxdriver#googlecode.com/components/driver-component.js:9010)
at WebLoadingListener/e (file:///c:/users/mgal/appdata/local/temp/tmpsasxck/extensions/fxdriver#googlecode.com/components/driver-component.js:5114)
at WebLoadingListener/< (file:///c:/users/mgal/appdata/local/temp/tmpsasxck/extensions/fxdriver#googlecode.com/components/driver-component.js:5122)
at fxdriver.Timer.prototype.setTimeout/<.notify (file:///c:/users/mgal/appdata/local/temp/tmpsasxck/extensions/fxdriver#googlecode.com/components/driver-component.js:621)
In conclusion, how can I catch the Timeout Exception out of the thread scope?
Thanks ahead!
You can't. You should handle exception in the thread, so your function should look more or less like:
def load_page(browser, url):
try:
browser.set_page_load_timeout(20)
browser.get(url)
except TimeoutException:
'''Handle me here'''
EDIT: What you actually request for is:
from selenium import webdriver
from threading import Thread
from Queue import Queue
from selenium.common.exceptions import TimeoutException
class Test():
def __init__(self, queue, url):
browser = webdriver.Firefox()
def load_page(browser, url):
try:
browser.set_page_load_timeout(20)
browser.get(url)
except Exception as e:
queue.put(e)
else:
queue.put('OK or whatever the result you want')
t = Thread(target=load_page, args=(browser, url, ))
t.start()
if __name__ == '__main__':
urls = ('http://www.stackoverflow.com', 'http://http://meta.stackoverflow.com/')
queue = Queue()
for url in urls:
Test(queue, url)
for i in range(len(urls)):
result = queue.get()
if isinstance(result, Exception):
'''Handle exception preferably trying to determine the actual exception type'''
else:
'''Say cool cause everything is fine'''
Threading children run in their own stack, so this is impossible without message/event passing. You can use python's Queue library (which is thread-safe) and pass a queue object into your child function, using that as an event pool that the parent can handle.

Python : _MainThread' object has no attribute '_state'

Hey Guys I am creating an application which takes in a request from the user. The main class in the server side is the Controller . I spawn a thread during init, which keeps actively listening for requests from the client ( I need to spawn a thread here. )
Once I get an request, I look into the type of request and call a function to handle it.
In that function, I want to create multiple processes to utilise my 8 cores effectively.
Here is the code:-
class Controller(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
self.datapaths={}
self.monitor_thread=hub.spawn(self._monitor)
super(Controller, self).__init__( *args, **kwargs)
def _monitor(self):
global connstream
while True:
#Get Connection from client
data = connstream.read(15000)
data=eval(data)
print "Recieved a request from the client:-",data
for key,value in data.iteritems():
type=int(key)
request=value
if type==4:
self.get_route(type,request,connstream)
def get_route(self,type,request,connection):
global get_route_result
cities=request['Cities']
number_of_cities=request['Number_of_Cities']
city_count=0
processes=[]
pool = mp.Pool(processes=8)
for city,destination_ip in cities.iteritems():
args=(type,destination_ip)
processes.append(args)
city_count=city_count+1
if city_count==number_of_cities:
break
pool.map(self.get_route_process,processes)
def get_route_process(self,HOST,destination):
#Do Something
But the error I get is:-
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 325, in _handle_workers
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
AttributeError: '_MainThread' object has no attribute '_state'
So in a nutshell, I create a thread, which tries to create multiple processes, but the code fails.

AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for'

I am trying to read meta info from celery task in case of timeout (if task is not finished in given time). I have 3 celery workers. When I execute tasks on 3 workers serially my timeout logic (getting meta info from redis backend) works fine. But, when I execute tasks in parallel using threads, I get error 'AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for''.
main script.
from threading import Thread
from util.tasks import app
from celery.exceptions import TimeoutError
# from celery.task.control import revoke
from celery.result import AsyncResult
def run(cmd, workerName, async=False, timeout=9999999):
print "Executing Celery cmd: ", cmd
ret = app.send_task(workerName+'.run_cmd', args=[cmd], kwargs={}, queue=workerName)
if async:
return ret
else:
try:
return ret.get(timeout=timeout)
except TimeoutError:
task = AsyncResult(ret.task_id)
# print task.info
out = task.info['PROGRESS']
# stop_task(ret.task_id)
print 'TIMEOUT', out
return 'TIMEOUT', out
cmd = r'ping 10.10.10.10'
threads = []
# this block works
print "This block works"
run(cmd, 'MH_VTF203', timeout=10)
run(cmd, 'MH_VTF1661', timeout=10)
run(cmd, 'MH_VTF106', timeout=10)
# this block errors
print "This block erros"
for vtf in ['MH_VTF203', 'MH_VTF1661', 'MH_VTF106']:
t = Thread(target=run, args=[cmd, vtf], kwargs={'timeout': 10})
t.start()
threads.append(t)
for t in threads:
t.join()
util.tasks.py
from celery import Celery
import subprocess
app = Celery('tasks', backend='redis://', broker='redis://localhost:6379/0')
app.conf.CELERY_IGNORE_RESULT = False
app.conf.CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
#app.task()
def run_cmd(*args, **kwargs):
cmd = " ".join(args)
print "executing command :",cmd
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = ""
while p.poll() is None:
l = p.stdout.readline()
print l
out += l
run_cmd.update_state(
state='PROGRESS',
meta={'PROGRESS': out}
)
l = p.stdout.read()
print l
out += l
return out
except subprocess.CalledProcessError, e:
print 'Error executing command: ', cmd
return str(e)
Output.
C:\Python27\python.exe C:/Users/mkr/Documents/work/New_RoD/testing/run.py
This block works
Executing Celery cmd: ping 10.10.10.10
TIMEOUT
Pinging 10.10.10.10 with 32 bytes of data:
Request timed out.
Request timed out.
Executing Celery cmd: ping 10.10.10.10
TIMEOUT
Pinging 10.10.10.10 with 32 bytes of data:
Request timed out.
Request timed out.
Executing Celery cmd: ping 10.10.10.10
TIMEOUT
Pinging 10.10.10.10 with 32 bytes of data:
Request timed out.
Request timed out.
This block erros
Executing Celery cmd: ping 10.10.10.10
Executing Celery cmd: ping 10.10.10.10
Executing Celery cmd: ping 10.10.10.10
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 810, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "C:/Users/mkr/Documents/work/New_RoD/testing/run.py", line 18, in run
out = task.info['PROGRESS']
File "C:\Python27\lib\site-packages\celery\result.py", line 356, in result
return self._get_task_meta()['result']
File "C:\Python27\lib\site-packages\celery\result.py", line 339, in _get_task_meta
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
File "C:\Python27\lib\site-packages\celery\backends\base.py", line 292, in get_task_meta
meta = self._get_task_meta_for(task_id)
AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for'
Exception in thread Thread-2:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 810, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "C:/Users/mkr/Documents/work/New_RoD/testing/run.py", line 18, in run
out = task.info['PROGRESS']
File "C:\Python27\lib\site-packages\celery\result.py", line 356, in result
return self._get_task_meta()['result']
File "C:\Python27\lib\site-packages\celery\result.py", line 339, in _get_task_meta
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
File "C:\Python27\lib\site-packages\celery\backends\base.py", line 292, in get_task_meta
meta = self._get_task_meta_for(task_id)
AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for'
Exception in thread Thread-3:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 810, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "C:/Users/mkr/Documents/work/New_RoD/testing/run.py", line 18, in run
out = task.info['PROGRESS']
File "C:\Python27\lib\site-packages\celery\result.py", line 356, in result
return self._get_task_meta()['result']
File "C:\Python27\lib\site-packages\celery\result.py", line 339, in _get_task_meta
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
File "C:\Python27\lib\site-packages\celery\backends\base.py", line 292, in get_task_meta
meta = self._get_task_meta_for(task_id)
AttributeError: 'DisabledBackend' object has no attribute '_get_task_meta_for'
Process finished with exit code 0
using app.AsyncResult worked for me
Works for me as suggested by https://stackoverflow.com/users/2682417/mylari in one of the comments above
celery1 = Celery('mytasks', backend='redis://localhost:6379/1', broker='redis://localhost:6379/0')
def t_status(id):
c = celery1.AsyncResult(id)
return c
Calling method:
#app.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
task_result = t_status(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result
}
return jsonify(result), 200
Celery operations are not thread safe - you probably want to wrap the call to task.info in a lock.
Also mixing celery and threads like that is a little odd.
Try this:
from celery.result import AsyncResult
from iota_celery.app_iota import app as celery_app
AsyncResult(x, app=celery_app).revoke(terminate=True, signal='SIGKILL')
celery.AsyncResult work for me:
celery = make_celery(flask_app)
task_result = celery.AsyncResult(task_id)

Categories

Resources