ConnectionResetError kombu - python

we have a producer consumer model on RMQ and from past time I have started getting this error
File "newrelic/api/background_task.py", line 117, in wrapper
return wrapped(*args, **kwargs)
File "crs_consumer.py", line 49, in process_message
message.ack()
File "kombu/message.py", line 123, in ack
self.channel.basic_ack(self.delivery_tag, multiple=multiple)
File "amqp/channel.py", line 1407, in basic_ack
return self.send_method(
File "amqp/abstract_channel.py", line 70, in send_method
conn.frame_writer(1, self.channel_id, sig, args, content)
File "amqp/method_framing.py", line 186, in write_frame
write(buffer_store.view[:offset])
File "amqp/transport.py", line 347, in write
self._write(s)
What could be the reason behind this error ?
python==3.9 kombu==5.0.2

Related

django.db.utils.DatabaseError: DatabaseWrapper objects created in a thread can only be used in that same thread - with celery beat and dbbackup

I am writing a code to take frequent backups of our database to dropbox. The following is my tasks.py that has a test code to execute a Django management command management.call_command('dbbackup', interactive=False) every 1 minute. But I am encountering the error django.db.utils.DatabaseError: DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias 'default' was created in thread id 140423029811616 and this is thread id 140422957233632..
tasks.py
from __future__ import absolute_import, unicode_literals
from myproject.celery import app
from celery.utils.log import get_task_logger
from celery.schedules import crontab
from django.core import management
#app.task
def db_backup_to_dropbox():
management.call_command('dbbackup', interactive=False)
app.conf.beat_schedule = {
'backup-every-second': {
'task': 'core.tasks.db_backup_to_dropbox',
'schedule': crontab(minute='*/1'),
'args': (),
},
}
Traceback from celery.log
[2020-07-25 16:24:00,050: INFO/MainProcess] Received task: core.tasks.db_backup_to_dropbox[a34653fe-a3c6-4259-a98d-7613c5f3446c]
[2020-07-25 16:24:00,054: INFO/MainProcess] Backing Up Database: myproject
[2020-07-25 16:24:00,213: INFO/MainProcess] Writing file to default-myserver-loc1-2020-07-25-162400.dump
[2020-07-25 16:24:00,215: INFO/MainProcess] Request to files/get_metadata
[2020-07-25 16:24:00,287: ERROR/MainProcess] RecursionError: maximum recursion depth exceeded while calling a Python object
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/utils.py", line 118, in wrapper
func(*args, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/dbbackup.py", line 61, in handle
self._save_new_backup(database)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/dbbackup.py", line 88, in _save_new_backup
self.write_to_storage(outputfile, filename)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/_base.py", line 88, in write_to_storage
self.storage.write_file(file, path)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/storage.py", line 82, in write_file
self.storage.save(name=filename, content=filehandle)
File "/myproject/venv/lib/python3.8/site-packages/django/core/files/storage.py", line 51, in save
name = self.get_available_name(name, max_length=max_length)
File "/myproject/venv/lib/python3.8/site-packages/django/core/files/storage.py", line 82, in get_available_name
while self.exists(name) or (max_length and len(name) > max_length):
File "/myproject/venv/lib/python3.8/site-packages/storages/backends/dropbox.py", line 93, in exists
return bool(self.client.files_get_metadata(self._full_path(name)))
File "/myproject/venv/lib/python3.8/site-packages/dropbox/base.py", line 1682, in files_get_metadata
r = self.request(
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 313, in request
res = self.request_json_string_with_retry(host,
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 463, in request_json_string_with_retry
return self.request_json_string(host,
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 559, in request_json_string
r = self._session.post(url,
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 578, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 670, in urlopen httplib_response = self._make_request(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 381, in _make_request
self._validate_conn(conn)
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 978, in _validate_conn
conn.connect()
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connection.py", line 343, in connect
self.ssl_context = create_urllib3_context(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 277, in create_urllib3_context
context.options |= options
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 460 more times]
[2020-07-25 16:24:00,325: ERROR/MainProcess] Task core.tasks.db_backup_to_dropbox[a34653fe-a3c6-4259-a98d-7613c5f3446c] raised unexpected: DatabaseError("DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias 'default' was created in thread id 140423029811616 and this is thread id 140422957233632.")
Traceback (most recent call last):
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/utils.py", line 118, in wrapper
func(*args, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/dbbackup.py", line 61, in handle
self._save_new_backup(database)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/dbbackup.py", line 88, in _save_new_backup
self.write_to_storage(outputfile, filename)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/management/commands/_base.py", line 88, in write_to_storage
self.storage.write_file(file, path)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/storage.py", line 82, in write_file
self.storage.save(name=filename, content=filehandle)
File "/myproject/venv/lib/python3.8/site-packages/django/core/files/storage.py", line 51, in save
name = self.get_available_name(name, max_length=max_length)
File "/myproject/venv/lib/python3.8/site-packages/django/core/files/storage.py", line 82, in get_available_name
while self.exists(name) or (max_length and len(name) > max_length):
File "/myproject/venv/lib/python3.8/site-packages/storages/backends/dropbox.py", line 93, in exists
return bool(self.client.files_get_metadata(self._full_path(name)))
File "/myproject/venv/lib/python3.8/site-packages/dropbox/base.py", line 1682, in files_get_metadata
r = self.request(
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 313, in request
res = self.request_json_string_with_retry(host,
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 463, in request_json_string_with_retry
return self.request_json_string(host,
File "/myproject/venv/lib/python3.8/site-packages/dropbox/dropbox.py", line 559, in request_json_string
r = self._session.post(url,
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 578, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 670, in urlopen httplib_response = self._make_request(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 381, in _make_request
self._validate_conn(conn)
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 978, in _validate_conn
conn.connect()
File "/myproject/venv/lib/python3.8/site-packages/urllib3/connection.py", line 343, in connect
self.ssl_context = create_urllib3_context(
File "/myproject/venv/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 277, in create_urllib3_context
context.options |= options
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
File "/usr/lib/python3.8/ssl.py", line 602, in options
super(SSLContext, SSLContext).options.__set__(self, value)
[Previous line repeated 460 more times]
RecursionError: maximum recursion depth exceeded while calling a Python object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/myproject/venv/lib/python3.8/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "/myproject/myproject/core/tasks.py", line 11, in db_backup_to_dropbox
management.call_command('dbbackup', interactive=False)
File "/myproject/venv/lib/python3.8/site-packages/django/core/management/__init__.py", line 168, in call_command
return command.execute(*args, **defaults)
File "/myproject/venv/lib/python3.8/site-packages/django/core/management/base.py", line 369, in execute
output = self.handle(*args, **options)
File "/myproject/venv/lib/python3.8/site-packages/dbbackup/utils.py", line 127, in wrapper
connection.close()
File "/myproject/venv/lib/python3.8/site-packages/django/utils/asyncio.py", line 26, in inner
return func(*args, **kwargs)
File "/myproject/venv/lib/python3.8/site-packages/django/db/backends/base/base.py", line 286, in close
self.validate_thread_sharing()
File "/myproject/venv/lib/python3.8/site-packages/django/db/backends/base/base.py", line 553, in validate_thread_sharing
raise DatabaseError(
django.db.utils.DatabaseError: DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias 'default' was created in thread id 140423029811616 and this is thread id 140422957233632.
Try adding these lines in setting.py
CELERY_TASK_ALWAYS_EAGER = True

UNEXPECTED_FRAME - expected content header for class 60, got non content header frame instead

What I am doing is, imagine that you have several workflows that need to execute. These workflows have tasks, and the target of the tasks are different hosts.
The fastest way to do this is running every workflow inside a process, and run them in parallel.
I am trying to run python multiprocessing to execute a remote function that I call with the help of celery. My program runs ok if I just run one process. But when I run more than one process, I get the error below. As far as I got, the issue is with concurrent publishing on the same channel. Channels should not be shared between threads/etc.
How I can make Celery to resolve this? Is is a parameter that I should launch with 'celeryd' command, or I need to do it in my python program?
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "testHello.py", line 16, in test_hello_aux
print output.get()
File "/usr/local/lib/python2.7/dist-packages/celery/result.py", line 169, in get
no_ack=no_ack,
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 155, in wait_for
on_interval=on_interval)
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 229, in consume
no_ack=no_ack, accept=self.accept) as consumer:
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 359, in __init__
self.revive(self.channel)
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 371, in revive
self.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 381, in declare
queue.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 505, in declare
self.queue_declare(nowait, passive=False)
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 531, in queue_declare
nowait=nowait)
File "/usr/local/lib/python2.7/dist-packages/amqp/channel.py", line 1254, in queue_declare
self._send_method((50, 10), args)
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 56, in _send_method
self.channel_id, method_sig, args, content,
File "/usr/local/lib/python2.7/dist-packages/amqp/method_framing.py", line 221, in write_method
write_frame(1, channel, payload)
File "/usr/local/lib/python2.7/dist-packages/amqp/transport.py", line 177, in write_frame
frame_type, channel, size, payload, 0xce,
File "/usr/lib/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
error: [Errno 32] Broken pipe
Process Process-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "testHello.py", line 16, in test_hello_aux
print output.get()
File "/usr/local/lib/python2.7/dist-packages/celery/result.py", line 169, in get
no_ack=no_ack,
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 155, in wait_for
on_interval=on_interval)
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 229, in consume
no_ack=no_ack, accept=self.accept) as consumer:
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 359, in __init__
Process Process-3:
self.revive(self.channel)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 371, in revive
self.declare()
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 381, in declare
queue.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 504, in declare
self.run()
self.exchange.declare(nowait)
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 166, in declare
self._target(*self._args, **self._kwargs)
nowait=nowait, passive=passive,
File "testHello.py", line 16, in test_hello_aux
File "/usr/local/lib/python2.7/dist-packages/amqp/channel.py", line 613, in exchange_declare
print output.get()
File "/usr/local/lib/python2.7/dist-packages/celery/result.py", line 169, in get
no_ack=no_ack,
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 155, in wait_for
on_interval=on_interval)
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 229, in consume
no_ack=no_ack, accept=self.accept) as consumer:
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 359, in __init__
self._send_method((40, 10), args)
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 56, in _send_method
self.channel_id, method_sig, args, content,
File "/usr/local/lib/python2.7/dist-packages/amqp/method_framing.py", line 221, in write_method
self.revive(self.channel)
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 371, in revive
self.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 381, in declare
write_frame(1, channel, payload)
queue.declare()
File "/usr/local/lib/python2.7/dist-packages/amqp/transport.py", line 177, in write_frame
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 504, in declare
frame_type, channel, size, payload, 0xce,
File "/usr/lib/python2.7/socket.py", line 224, in meth
self.exchange.declare(nowait)
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 166, in declare
nowait=nowait, passive=passive,
File "/usr/local/lib/python2.7/dist-packages/amqp/channel.py", line 620, in exchange_declare
return getattr(self._sock,name)(*args)
error: [Errno 32] Broken pipe
(40, 11), # Channel.exchange_declare_ok
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 67, in wait
self.channel_id, allowed_methods)
File "/usr/local/lib/python2.7/dist-packages/amqp/connection.py", line 237, in _wait_method
self.method_reader.read_method()
File "/usr/local/lib/python2.7/dist-packages/amqp/method_framing.py", line 189, in read_method
raise m
error: [Errno 104] Connection reset by peer
Process Process-4:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "testHello.py", line 16, in test_hello_aux
print output.get()
File "/usr/local/lib/python2.7/dist-packages/celery/result.py", line 169, in get
no_ack=no_ack,
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 155, in wait_for
on_interval=on_interval)
File "/usr/local/lib/python2.7/dist-packages/celery/backends/amqp.py", line 229, in consume
no_ack=no_ack, accept=self.accept) as consumer:
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 359, in __init__
self.revive(self.channel)
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 371, in revive
self.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/messaging.py", line 381, in declare
queue.declare()
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 505, in declare
self.queue_declare(nowait, passive=False)
File "/usr/local/lib/python2.7/dist-packages/kombu/entity.py", line 531, in queue_declare
nowait=nowait)
File "/usr/local/lib/python2.7/dist-packages/amqp/channel.py", line 1258, in queue_declare
(50, 11), # Channel.queue_declare_ok
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 67, in wait
self.channel_id, allowed_methods)
File "/usr/local/lib/python2.7/dist-packages/amqp/connection.py", line 270, in _wait_method
self.wait()
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 69, in wait
return self.dispatch_method(method_sig, args, content)
File "/usr/local/lib/python2.7/dist-packages/amqp/abstract_channel.py", line 87, in dispatch_method
return amqp_method(self, args)
File "/usr/local/lib/python2.7/dist-packages/amqp/connection.py", line 526, in _close
(class_id, method_id), ConnectionError)
UnexpectedFrame: Basic.publish: (505) UNEXPECTED_FRAME - expected content header for class 60, got non content header frame instead
celery --version 3.1.11 (Cipater)
amq --version 0.9.1
When using Celery you should not need to use the python multiprocessing module. Celery takes care of everything for you.
Define your task in a file called tasks.py
from celery import Celery
app = Celery('tasks', broker='amqp://guest#localhost//')
#app.task
def add(x, y):
return x + y
Now assume the add function is actually what ever you would like to run in parallel. Let's also consider terms. Parallel means at the same time, while async means not synchronously. I cannot guarantee you tasks will be run at the same time, though I can guarantee they will not be run synchronously. For that reason, lets stick with the term async.
Celery has Canvas, a set of primitives for async flow control. Two you would be interested in would be group and chord. group allows you to run a group of async tasks and ask block on the results of all of the async tasks (accomplishing what you were attempting with you join). chord provides the same functionality as group though fire a callback when all of the tasks complete.
An example of the calling code :
WAIT_TIME = 10 # how ever long you are willing to wait for your tasks
from tasks import add
from celery import group
future = group(add.s(i**i, i**i) for i in xrange(10))()
results = future.get(timeout=WAIT_TIME)
Celery tasks are automatically run in their own process (the workers you spawn) and do not require you to create further processes yourself.

Celery creating a new connection for each task

I'm using Celery with Redis to run some background tasks, but each time a task is called, it creates a new connection to Redis. I'm on Heroku and my Redis to Go plan allows for 10 connections. I'm quickly hitting that limit and getting a "max number of clients reached" error.
How can I ensure that Celery queues the tasks on a single connection rather than opening a new one each time?
EDIT - including the full traceback
File "/app/.heroku/venv/lib/python2.7/site-packages/django/core/handlers/base.py", line 111, in get_response
response = callback(request, *callback_args, **callback_kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/api/object_wrapper.py", line 166, in __call__
self._nr_instance, args, kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/hooks/framework_django.py", line 447, in wrapper
return wrapped(*args, **kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 77, in wrapped_view
return view_func(*args, **kwargs)
File "/app/feedback/views.py", line 264, in zencoder_webhook_handler
tasks.process_zencoder_notification.delay(webhook)
File "/app/.heroku/venv/lib/python2.7/site-packages/celery/app/task.py", line 343, in delay
return self.apply_async(args, kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/celery/app/task.py", line 458, in apply_async
with app.producer_or_acquire(producer) as P:
File "/usr/local/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/app/.heroku/venv/lib/python2.7/site-packages/celery/app/base.py", line 247, in producer_or_acquire
with self.amqp.producer_pool.acquire(block=True) as producer:
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/connection.py", line 705, in acquire
R = self.prepare(R)
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/pools.py", line 54, in prepare
p = p()
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/pools.py", line 45, in <lambda>
return lambda: self.create_producer()
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/pools.py", line 42, in create_producer
return self.Producer(self._acquire_connection())
File "/app/.heroku/venv/lib/python2.7/site-packages/celery/app/amqp.py", line 160, in __init__
super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/messaging.py", line 83, in __init__
self.revive(self.channel)
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/messaging.py", line 174, in revive
channel = self.channel = maybe_channel(channel)
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/connection.py", line 879, in maybe_channel
return channel.default_channel
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/connection.py", line 617, in default_channel
self.connection
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/connection.py", line 610, in connection
self._connection = self._establish_connection()
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/connection.py", line 569, in _establish_connection
conn = self.transport.establish_connection()
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py", line 722, in establish_connection
self._avail_channels.append(self.create_channel(self))
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py", line 705, in create_channel
channel = self.Channel(connection)
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/transport/redis.py", line 271, in __init__
self.client.info()
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/api/object_wrapper.py", line 166, in __call__
self._nr_instance, args, kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/api/function_trace.py", line 81, in literal_wrapper
return wrapped(*args, **kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/client.py", line 344, in info
return self.execute_command('INFO')
File "/app/.heroku/venv/lib/python2.7/site-packages/kombu/transport/redis.py", line 536, in execute_command
conn.send_command(*args)
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/connection.py", line 273, in send_command
self.send_packed_command(self.pack_command(*args))
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/connection.py", line 256, in send_packed_command
self.connect()
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/api/object_wrapper.py", line 166, in __call__
self._nr_instance, args, kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/newrelic-1.4.0.137/newrelic/api/function_trace.py", line 81, in literal_wrapper
return wrapped(*args, **kwargs)
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/connection.py", line 207, in connect
self.on_connect()
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/connection.py", line 233, in on_connect
if self.read_response() != 'OK':
File "/app/.heroku/venv/lib/python2.7/site-packages/redis/connection.py", line 283, in read_response
raise response
ResponseError: max number of clients reached
I ran into the same problem on Heroku with CloudAMQP. I do not know why, but I had no luck when assigning low integers to the BROKER_POOL_LIMIT setting.
Ultimately, I found that by setting BROKER_POOL_LIMIT=None or BROKER_POOL_LIMIT=0 my issue was mitigated. According to the Celery docs, this disables the connection pool. So far, this has not been a noticeable issue for me, however I'm not sure if it might be for you.
Link to relevant info: http://celery.readthedocs.org/en/latest/configuration.html#broker-pool-limit
I wish I was using Redis, because there is a specific option to limit the number of connections: CELERY_REDIS_MAX_CONNECTIONS.
http://docs.celeryproject.org/en/3.0/configuration.html#celery-redis-max-connections (for 3.0)
http://docs.celeryproject.org/en/latest/configuration.html#celery-redis-max-connections (for 3.1)
http://docs.celeryproject.org/en/master/configuration.html#celery-redis-max-connections (for dev)
The MongoDB has a similar backend setting.
Given these backend settings, I have no idea what BROKER_POOL_LIMIT actually does. Hopefully CELERY_REDIS_MAX_CONNECTIONS solves your problem.
I'm one of those folks using CloudAMQP, and the AMQP backend does not have its own connection limit parameter.
Try those settings :
CELERY_IGNORE_RESULT = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
I had a similar issue involving number of connections and Celery. It wasn't on Heroku, and it was Mongo and not Redis though.
I initiated the connection outside of the task function definition at the task module level. At least for Mongo this allowed the tasks to share the connection.
Hope that helps.
https://github.com/instituteofdesign/wander/blob/master/wander/tasks.py
mongoengine.connect('stored_messages')
#celery.task(default_retry_delay = 61)
def pull(settings, google_settings, user, folder, messageid):
'''
Pulls a message from zimbra and stores it in Mongo
'''
try:
imap = imap_connect(settings, user)
imap.select(folder, True)
.......

ArgumentError: An error occurred parsing (locally or remotely) the arguments to datastore_v3.Put()

My app started throwing this error last night. Users are complaining the site is slow as well. Still on Python 2.5. As far as I can tell the request itself is not unusual.
Any ideas?
Traceback (most recent call last):
File "/base/python_runtime/python_lib/versions/1/google/appengine/ext/webapp/_webapp25.py", line 703, in __call__
handler.post(*groups)
File "/base/data/home/apps/s~jupitersfolly/4.357517623694016771/controller/frequest.py", line 57, in post
request_types[request_type]()
File "/base/data/home/apps/s~jupitersfolly/4.357517623694016771/controller/frequest.py", line 78, in order
event, report, messages = Game.update(game_number, user_id, order)
File "/base/data/home/apps/s~jupitersfolly/4.357517623694016771/model/game.py", line 355, in update
event, report, game, universe, messages, updated, ended = db.run_in_transaction(Game.transactional_update, key_id, user_id, order)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/datastore.py", line 2433, in RunInTransaction
return RunInTransactionOptions(None, function, *args, **kwargs)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/datastore.py", line 2571, in RunInTransactionOptions
ok, result = _DoOneTry(new_connection, function, args, kwargs)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/datastore.py", line 2593, in _DoOneTry
result = function(*args, **kwargs)
File "/base/data/home/apps/s~jupitersfolly/4.357517623694016771/model/game.py", line 348, in transactional_update
game.put()
File "/base/python_runtime/python_lib/versions/1/google/appengine/ext/db/__init__.py", line 1074, in put
return datastore.Put(self._entity, **kwargs)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/datastore.py", line 579, in Put
return PutAsync(entities, **kwargs).get_result()
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/apiproxy_stub_map.py", line 604, in get_result
return self.__get_result_hook(self)
File "/base/python_runtime/python_lib/versions/1/google/appengine/datastore/datastore_rpc.py", line 1577, in __put_hook
self.check_rpc_success(rpc)
File "/base/python_runtime/python_lib/versions/1/google/appengine/datastore/datastore_rpc.py", line 1212, in check_rpc_success
rpc.check_success()
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/apiproxy_stub_map.py", line 570, in check_success
self.__rpc.CheckSuccess()
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/apiproxy_rpc.py", line 133, in CheckSuccess
raise self.exception
ArgumentError: An error occurred parsing (locally or remotely) the arguments to datastore_v3.Put()

Twisted Web Proxy Help!

I wrote a Twisted Python HTTP proxy, and keep getting the following Traceback after navigating to a page through the proxy.
Traceback (most recent call last):
File "C:\ZBrownTechnology\Web Lock\Proxy.py", line 57, in <module>
reactor.run()
File "C:\Python26\lib\site-packages\twisted\internet\base.py", line 1165, in run
self.mainLoop()
File "C:\Python26\lib\site-packages\twisted\internet\base.py", line 1177, in mainLoop
self.doIteration(t)
File "C:\Python26\lib\site-packages\twisted\internet\selectreactor.py", line 140, in doSelect
_logrun(selectable, _drdw, selectable, method, dict)
--- <exception caught here> ---
File "C:\Python26\lib\site-packages\twisted\python\log.py", line 84, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "C:\Python26\lib\site-packages\twisted\python\log.py", line 69, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "C:\Python26\lib\site-packages\twisted\python\context.py", line 59, in ca
llWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "C:\Python26\lib\site-packages\twisted\python\context.py", line 37, in callWithContext
return func(*args,**kw)
File "C:\Python26\lib\site-packages\twisted\internet\selectreactor.py", line 156, in _doReadOrWrite
self._disconnectSelectable(selectable, why, method=="doRead")
File "C:\Python26\lib\site-packages\twisted\internet\posixbase.py", line 250,
in _disconnectSelectable
selectable.readConnectionLost(f)
File "C:\Python26\lib\site-packages\twisted\internet\tcp.py", line 508, in readConnectionLost
self.connectionLost(reason)
File "C:\Python26\lib\site-packages\twisted\internet\tcp.py", line 677, in connectionLost
Connection.connectionLost(self, reason)
File "C:\Python26\lib\site-packages\twisted\internet\tcp.py", line 519, in connectionLost
protocol.connectionLost(reason)
File "C:\Python26\lib\site-packages\twisted\web\http.py", line 489, in connectionLost
self.handleResponseEnd()
File "C:\Python26\lib\site-packages\twisted\web\proxy.py", line 88, in handleResponseEnd
self.father.finish()
File "C:\Python26\lib\site-packages\twisted\web\http.py", line 900, in finish
"Request.finish called on a request after its connection was lost; "
exceptions.RuntimeError: Request.finish called on a request after its connection
was lost; use Request.notifyFinish to keep track of this.
What does this mean? How do I fix it? Is it a module problem, or a problem in my code? I am on Windows XP using Python 2.6
This is a known bug in twisted.web.proxy. It's typically harmless. If it's causing problems for you, please consider contributing a patch to fix it!

Categories

Resources