I created celery task and it should start each hour at the 0th minute, but it does not run. What doing wrong?
celery
from __future__ import absolute_import, unicode_literals
import os
import pytz
from celery import Celery
from datetime import datetime
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
app = Celery('app', broker='amqp://rabbit:5672')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
#app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(minute=0, hour='0,1,2,3,4,5,6,7,8,9,10,11,12, \
13,14,15,16,17,18,19,20,21,22,23,24'),
task.s())
#app.task
def task():
#any code
In the terminal, I see this info, but the task is not running
[2018-05-09 19:05:16,275: INFO/Beat] beat: Starting...
Try change your task code to something like this:
from celery.task import periodic_task
from celery.schedules import crontab
#periodic_task(
run_every=(crontab(minute='*/60')),
name="task_name")
def run_some_task():
'''some code'''
Related
While calling the .delay() method of an imported task from a django application, the process gets stuck and the request is never completed.
We also don't get any error on the console.
Setting up a set_trace() with pdb results in the same thing.
The following questions were reviewed which didn't help resolve the issue:
Calling celery task hangs for delay and apply_async
celery .delay hangs (recent, not an auth problem)
Eg.:
backend/settings.py
CELERY_BROKER_URL = os.environ.get("CELERY_BROKER", RABBIT_URL)
CELERY_RESULT_BACKEND = os.environ.get("CELERY_BROKER", RABBIT_URL)
backend/celery.py
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
app = Celery('backend')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
app/tasks.py
import time
from celery import shared_task
#shared_task
def upload_file(request_id):
time.sleep(request_id)
return True
app/views.py
from rest_framework.views import APIView
from .tasks import upload_file
class UploadCreateAPIView(APIView):
# other methods...
def post(self, request, *args, **kwargs):
id = request.data.get("id", None)
# business logic ...
print("Going to submit task.")
import pdb; pdb.set_trace()
upload_file.delay(id) # <- this hangs the runserver as well as the set_trace()
print("Submitted task.")
The issue was with the setup of the celery application with Django. We need to make sure that the celery app is imported and initialized in the following file:
backend\__init__.py
from __future__ import absolute_import, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
__all__ = ('celery_app',)
I've run into this issue that Celery calls through delay or apply_async may randomly hang the program indefinitely. I tried the all broker_transport_options and retry_policy options to let Celery to recover, but it still happens. Then I found this solution to enforce an execution time limit for an execution block/function by using underlying Python signal handlers.
#contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def my_function():
with time_limit(3):
celery_call.apply_sync(kwargs={"k1", "v1"}, expires=30)
I want to monitor/report task statuses, but tasks are saved only when all tasks are done. I want them to be saved as soon as they start.
'''inside my Queue project'''
celery.py
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Queue.settings')
app = Celery('Queue',
broker='redis://localhost:6379',
backend='django-db',
task_track_started=True,
include=['index.tasks'])
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
inside "index" app
tasks.py
from __future__ import absolute_import, unicode_literals
from celery import shared_task, current_task
import time
#shared_task
def gozle():
time.sleep(15)
return 1
views.py
def index(request):
gozle.delay()
return HttpResponse('admin')
I expect that as I visit index page my task be triggered and be recorded into ResultTask, but it waits 15 seconds then be recorded.
The start of the task should be acknowledged immediately, you should see in your celery server a log entry that says that gozle task started, then it wait 15 seconds and will exit, this exit state will also be on the celery server log. If that is not the case maybe it has something to do with task_acks_late enabled in celery config, here is the doc, making it only acknowledge tasks after they end.
I'm trying to create a periodic task within a Django app.
I added this to my settings.py:
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'get_checkins': {
'task': 'api.tasks.get_checkins',
'schedule': timedelta(seconds=1)
}
}
I'm just getting started with Celery and haven't figured out which broker I want to use, so I added this as well to just bypass the broker for the time being:
if DEBUG:
CELERY_ALWAYS_EAGER = True
I also created a celery.py file in my project folder:
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testproject.settings')
app = Celery('testproject')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
Inside my app, called api, I made a tasks.py file:
from celery import shared_task
#shared_task
def get_checkins():
print('hello from get checkins')
I'm running the worker and beat with celery -A testproject worker --beat -l info
It starts up fine and I can see the task is registered under [tasks], but I don't see any jobs getting logged. Should be one per second. Can anyone tell why this isn't executing?
I looked at your post and don't see any comment on the broker you are using along with celery.
Have you installed a broker like Rabbitmq? Is it running or logging some kind of error?
Celery needs a broker to send and receive data.
Check the documentation here (http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html#choosing-a-broker)
I have a Heroku worker dyno that is not printing or logging anything to the Heroku logs.
I set up the worker in my procfile so that all logging.info() commands should work:
worker: celery -A tasks worker -B --loglevel=info
Here is the tasks.py file:
from celery import Celery
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
import json
import settings, logging
import datetime
from mongoengine import DoesNotExist
app = Celery('tasks',
broker=settings.get('rabbitmq_bigwig_url'),
backend='amqp')
#periodic_task(run_every=datetime.timedelta(minutes=1))
def test():
print 'Not printing!'
logging.info('Also not printing!')
How do I get print/logging messages to write to Heroku's logs? I've tried all the Heroku log commands (heroku logs, heroku logs --ps worker, etc.)
In the example you provided, you initialize logger = get_task_logger(__name__), but then when you mean to log something, you're using logging.info(..).
In your final line, replace logging. with logger..
I have a flask app that roughly looks like this:
app = Flask(__name__)
#app.route('/',methods=['POST'])
def foo():
data = json.loads(request.data)
# do some stuff
return "OK"
Now in addition I would like to run a function every ten seconds from that script. I don't want to use sleep for that. I have the following celery script in addition:
from celery import Celery
from datetime import timedelta
celery = Celery('__name__')
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'tasks.add',
'schedule': timedelta(seconds=10)
},
}
#celery.task(name='tasks.add')
def hello():
app.logger.info('run my function')
The script works fine, but the logger.info is not executed. What am I missing?
Do you have Celery worker and Celery beat running? Scheduled tasks are handled by beat, which queues the task mentioned when appropriate. Worker then actually crunches the numbers and executes your task.
celery worker --app myproject--loglevel=info
celery beat --app myproject
Your task however looks like it's calling the Flask app's logger. When using the worker, you probably don't have the Flask application around (since it's in another process). Try using a normal Python logger for the demo task.
Well, celery beat can be embedded in regular celery worker as well, with -B parameter in your command.
celery -A --app myproject --loglevel=info -B
It is only recommended for the development environment. For production, you should run beat and celery workers separately as documentation mentions. Otherwise, your periodic task will run more than one time.
A celery task by default will run outside of the Flask app context and thus it won't have access to Flask app instance. However it's very easy to create the Flask app context while running a task by using app_context method of the Flask app object.
app = Flask(__name__)
celery = Celery(app.name)
#celery.task
def task():
with app.app_context():
app.logger.info('running my task')
This article by Miguel Grinberg is a very good place to get a primer on the basics of using Celery in a Flask application.
First install the redis on machine and check it is running or not.
install the python dependencies
celery
redis
flask
folder structure
project
app
init.py
task.py
main.py
write task.py
from celery import Celery
from celery.schedules import crontab
from app import app
from app.scrap import product_data
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
def make_celery(app):
#Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://127.0.0.1:6379'
app.config['CELERY_RESULT_BACKEND'] = 'db+postgresql://user:password#172.17.0.3:5432/mydatabase'
app.config['CELERY_RESULT_EXTENDED']=True
app.config['CELERYBEAT_SCHEDULE'] = {
# Executes every minute
'periodic_task-every-minute': {
'task': 'periodic_task',
'schedule': crontab(minute="*")
}
}
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery = make_celery(app)
#celery.task(name="periodic_task",bind=True)
def testing(self):
file1 = open("../myfile.txt", "a")
# writing newline character
file1.write("\n")
file1.write("Today")
#faik
print("Running")
self.request.task_name = "state"
logger.info("Hello! from periodic task")
return "Done"
write init.py
from flask import Flask, Blueprint,request
from flask_restx import Api,Resource,fields
from flask_sqlalchemy import SQLAlchemy
import redis
from rq import Queue
app = Flask(__name__)
app.config['SECRET_KEY']='7c09ebc8801a0ce8fb82b3d2ec51b4db'
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///site.db'
db=SQLAlchemy(app)
command to run celery beat and worker
celery -A app.task.celery beat
celery -A app.task.celery worker --loglevel=info