I am using python docker SDK to create multiple chrome containers. Below is my script
Here first I have pull docker image then try to create 2 containers out it. But it is failing with an error message that ports are already in use but I am incrementing the ports value as per container count.
import docker, sys
class CreateContainer:
def __init__(self):
self.client = CreateContainer.create_client()
#staticmethod
def create_client():
client = docker.from_env()
return client
def pull_image(self, image_name):
image = client.images.pull(image_name)
print(image.name)
def create_containers(self, image, container_name, expose_port, countainer_count=1):
container = self.client.containers.run(
image,
name=container_name,
hostname=container_name,
ports=expose_port,
detach=True
)
for line in container.logs():
print(line)
return container
if __name__ == '__main__':
threads = int(sys.argv[1])
c_obj = CreateContainer()
for i in range(1, threads+1):
c_obj.create_containers("selenium/standalone-chrome", "Chrome_{0}".format(i), expose_port={5550+i:4444})
------Run----------
python test.py 2
------error-----
Traceback (most recent call last):
File "C:\Program Files\Python39\lib\site-packages\docker\api\client.py", line 268, in _raise_for_status
response.raise_for_status()
File "C:\Program Files\Python39\lib\site-packages\requests\models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localnpipe/v1.40/containers/260894dbaec6946e5f31fdbfb5307182d2f621c12a38f328f6efac58df58854d/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Desktop\FreeLance\Utility\src\docker_engine\create_container.py", line 53, in <module>
c_obj.create_containers("selenium/standalone-chrome", "Chrome_{0}".format(i), expose_port={5550+i:4444})
File "C:\Users\Desktop\FreeLance\Utility\src\docker_engine\create_container.py", line 20, in create_containers
container = self.client.containers.run(
File "C:\Program Files\Python39\lib\site-packages\docker\models\containers.py", line 818, in run
container.start()
File "C:\Program Files\Python39\lib\site-packages\docker\models\containers.py", line 404, in start
return self.client.api.start(self.id, **kwargs)
File "C:\Program Files\Python39\lib\site-packages\docker\utils\decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "C:\Program Files\Python39\lib\site-packages\docker\api\container.py", line 1111, in start
self._raise_for_status(res)
File "C:\Program Files\Python39\lib\site-packages\docker\api\client.py", line 270, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "C:\Program Files\Python39\lib\site-packages\docker\errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error for http+docker://localnpipe/v1.40/containers/260894dbaec6946e5f31fdbfb5307182d2f621c12a38f328f6efac58df58854d/start: Internal Server Error ("driver failed programming external connectivity on endpoint Chrome_2 (c0c77528743a1e3153201565b2cb520243b66adbb903bb69e91a00c4399aca62): Bind for 0.0.0.0:4444 failed: port is already allocated")
While proving port the syntax is container_port:host_port
c_obj.create_containers("selenium/standalone-chrome", "Chrome_{0}".format(i), expose_port={4444:5550+i})
documentation : https://docker-py.readthedocs.io/en/stable/containers.html
Related
I am trying to use Azure Service Bus as the broker for my celery app.
I have patched the solution by referring to various sources.
The goal is to use Azure Service Bus as the broker and PostgresSQL as the backend.
I created an Azure Service Bus and copied the credentials for the RootManageSharedAccessKey to the celery app.
Following is the task.py
from time import sleep
from celery import Celery
from kombu.utils.url import safequote
SAS_policy = safequote("RootManageSharedAccessKey") #SAS Policy
SAS_key = safequote("1234222zUY28tRUtp+A2YoHmDYcABCD") #Primary key from the previous SS
namespace = safequote("bluenode-dev")
app = Celery('tasks', backend='db+postgresql://afsan.gujarati:admin#localhost/local_dev',
broker=f'azureservicebus://{SAS_policy}:{SAS_key}=#{namespace}')
#app.task
def divide(x, y):
sleep(30)
return x/y
When I try to run the Celery app using the following command:
celery -A tasks worker --loglevel=INFO
I get the following error
[2020-10-09 14:00:32,035: CRITICAL/MainProcess] Unrecoverable error: AzureHttpError('Unauthorized\n<Error><Code>401</Code><Detail>claim is empty or token is invalid. TrackingId:295f7c76-770e-40cc-8489-e0eb56248b09_G5S1, SystemTracker:bluenode-dev.servicebus.windows.net:$Resources/Queues, Timestamp:2020-10-09T20:00:31</Detail></Error>')
Traceback (most recent call last):
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 918, in create_channel
return self._avail_channels.pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/servicebusservice.py", line 1225, in _perform_request
resp = self._filter(request)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/_http/httpclient.py", line 211, in perform_request
raise HTTPError(status, message, respheaders, respbody)
azure.servicebus.control_client._http.HTTPError: Unauthorized
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/worker.py", line 203, in start
self.blueprint.start(self)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/bootsteps.py", line 365, in start
return self.obj.start()
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/consumer/consumer.py", line 311, in start
blueprint.start(self)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/consumer/connection.py", line 21, in start
c.connection = c.connect()
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/consumer/consumer.py", line 398, in connect
conn = self.connection_for_read(heartbeat=self.amqheartbeat)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/consumer/consumer.py", line 404, in connection_for_read
return self.ensure_connected(
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/celery/worker/consumer/consumer.py", line 430, in ensure_connected
conn = conn.ensure_connection(
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/connection.py", line 383, in ensure_connection
self._ensure_connection(*args, **kwargs)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/connection.py", line 435, in _ensure_connection
return retry_over_time(
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/utils/functional.py", line 325, in retry_over_time
return fun(*args, **kwargs)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/connection.py", line 866, in _connection_factory
self._connection = self._establish_connection()
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/connection.py", line 801, in _establish_connection
conn = self.transport.establish_connection()
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 938, in establish_connection
self._avail_channels.append(self.create_channel(self))
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/transport/virtual/base.py", line 920, in create_channel
channel = self.Channel(connection)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/kombu/transport/azureservicebus.py", line 64, in __init__
for queue in self.queue_service.list_queues():
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/servicebusservice.py", line 313, in list_queues
response = self._perform_request(request)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/servicebusservice.py", line 1227, in _perform_request
return _service_bus_error_handler(ex)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/_serialization.py", line 569, in _service_bus_error_handler
return _general_error_handler(http_error)
File "/Users/afsan.gujarati/.pyenv/versions/3.8.1/envs/celery-servicebus/lib/python3.8/site-packages/azure/servicebus/control_client/_common_error.py", line 41, in _general_error_handler
raise AzureHttpError(message, http_error.status)
azure.common.AzureHttpError: Unauthorized
<Error><Code>401</Code><Detail>claim is empty or token is invalid. TrackingId:295f7c76-770e-40cc-8489-e0eb56248b09_G5S1, SystemTracker:bluenode-dev.servicebus.windows.net:$Resources/Queues, Timestamp:2020-10-09T20:00:31</Detail></Error>
I don't see a straight solution for this anywhere. What am I missing?
P.S. I did not create the Queue in Azure Service Bus. I am assuming that celery would create the Queue by itself when the celery app is executed.
P.S.S. I also tried to use the exact same credentials in Python's Service Bus Client and it seemed to work. It feels like a Celery issue, but I am not able to figure out exactly what.
If you want to use Azure Service Bus Transport to connect Azure service bus, the URL should be azureservicebus://{SAS policy name}:{SAS key}#{Service Bus Namespace}.
For example
Get Shared access policies RootManageSharedAccessKey
Code
from celery import Celery
from kombu.utils.url import safequote
SAS_policy = "RootManageSharedAccessKey" # SAS Policy
# Primary key from the previous SS
SAS_key = safequote("X/*****qyY=")
namespace = "bowman1012"
app = Celery('tasks', backend='db+postgresql://<>#localhost/<>',
broker=f'azureservicebus://{SAS_policy}:{SAS_key}#{namespace}')
#app.task
def add(x, y):
return x + y
I'm trying to use Celery with SQS as broker. In order to use the SQS from my container I need to assume a role and for that I'm using STS. My code looks like this:
role_info = {
'RoleArn': 'arn:aws:iam::xxxxxxx:role/my-role-execution',
'RoleSessionName': 'roleExecution'
}
sts_client = boto3.client('sts', region_name='eu-central-1')
credentials = sts_client.assume_role(**role_info)
aws_access_key_id = credentials["Credentials"]['AccessKeyId']
aws_secret_access_key = credentials["Credentials"]['SecretAccessKey']
aws_session_token = credentials["Credentials"]["SessionToken"]
os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
os.environ["AWS_DEFAULT_REGION"] = 'eu-central-1'
os.environ["AWS_SESSION_TOKEN"] = aws_session_token
broker = "sqs://"
backend = 'redis://redis-service:6379/0'
celery = Celery('tasks', broker=broker, backend=backend)
celery.conf["task_default_queue"] = 'my-queue'
celery.conf["broker_transport_options"] = {
'region': 'eu-central-1',
'predefined_queues': {
'my-queue': {
'url': 'https://sqs.eu-central-1.amazonaws.com/xxxxxxx/my-queue'
}
}
}
In the same file I have the following task:
#celery.task(name='my-queue.my_task')
def my_task(content) -> int:
print("hello")
return 0
When I execute the following code I get an error:
[2020-09-24 10:38:03,602: CRITICAL/MainProcess] Unrecoverable error: ClientError('An error occurred (AccessDenied) when calling the ListQueues operation: Access to the resource https://eu-central-1.queue.amazonaws.com/ is denied.',)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 921, in create_channel
return self._avail_channels.pop()
IndexError: pop from empty list
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/celery/worker/worker.py", line 208, in start
self.blueprint.start(self)
File "/usr/local/lib/python3.6/site-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
File "/usr/local/lib/python3.6/site-packages/celery/bootsteps.py", line 369, in start
return self.obj.start()
File "/usr/local/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 318, in start
blueprint.start(self)
File "/usr/local/lib/python3.6/site-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
File "/usr/local/lib/python3.6/site-packages/celery/worker/consumer/connection.py", line 23, in start
c.connection = c.connect()
File "/usr/local/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 405, in connect
conn = self.connection_for_read(heartbeat=self.amqheartbeat)
File "/usr/local/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 412, in connection_for_read
self.app.connection_for_read(heartbeat=heartbeat))
File "/usr/local/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 439, in ensure_connected
callback=maybe_shutdown,
File "/usr/local/lib/python3.6/site-packages/kombu/connection.py", line 422, in ensure_connection
callback, timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/kombu/utils/functional.py", line 341, in retry_over_time
return fun(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/kombu/connection.py", line 275, in connect
return self.connection
File "/usr/local/lib/python3.6/site-packages/kombu/connection.py", line 823, in connection
self._connection = self._establish_connection()
File "/usr/local/lib/python3.6/site-packages/kombu/connection.py", line 778, in _establish_connection
conn = self.transport.establish_connection()
File "/usr/local/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 941, in establish_connection
self._avail_channels.append(self.create_channel(self))
File "/usr/local/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 923, in create_channel
channel = self.Channel(connection)
File "/usr/local/lib/python3.6/site-packages/kombu/transport/SQS.py", line 100, in __init__
self._update_queue_cache(self.queue_name_prefix)
File "/usr/local/lib/python3.6/site-packages/kombu/transport/SQS.py", line 105, in _update_queue_cache
resp = self.sqs.list_queues(QueueNamePrefix=queue_name_prefix)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 337, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 656, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the ListQueues operation: Access to the resource https://eu-central-1.queue.amazonaws.com/ is denied.
If I use boto3 directly without Celery, I'm able to connect to the queue and retrieve data without this error. I don't know why Celery/Kombu try to list queues when I specify the predefined_queues configuration, tha is used to avoid these behavior (from docs):
If you want Celery to use a set of predefined queues in AWS, and to never attempt to list SQS queues, nor attempt to create or delete them, pass a map of queue names to URLs using the predefined_queue_urls setting
Source here
Anyone know what happens? How I should modify my code in order to make it work?. Seems that Celery is not using the credentials at all.
The versions I'm using:
celery==4.4.7
boto3==1.14.54
kombu==4.5.0
Thanks!
PS: I created and issue in Github to track if this can be a library error or not...
I solved the problem updating dependencies to the latest versions:
celery==5.0.0
boto3==1.14.54
kombu==5.0.2
pycurl==7.43.0.6
I was able to get celery==4.4.7 and kombu==4.6.11 working by setting the following configuration option:
celery.conf["task_create_missing_queues"] = False
I receive following output:
Traceback (most recent call last):
File "/home/ec2-user/env/lib64/python3.7/site-packages/redis/connection.py", line 1192, in get_connection
raise ConnectionError('Connection has data')
redis.exceptions.ConnectionError: Connection has data
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ec2-user/env/lib64/python3.7/site-packages/eventlet/hubs/hub.py", line 457, in fire_timers
timer()
File "/home/ec2-user/env/lib64/python3.7/site-packages/eventlet/hubs/timer.py", line 58, in __call__
cb(*args, **kw)
File "/home/ec2-user/env/lib64/python3.7/site-packages/eventlet/greenthread.py", line 214, in main
result = function(*args, **kwargs)
File "crawler.py", line 53, in fetch_listing
url = dequeue_url()
File "/home/ec2-user/WebCrawler/helpers.py", line 109, in dequeue_url
return redis.spop("listing_url_queue")
File "/home/ec2-user/env/lib64/python3.7/site-packages/redis/client.py", line 2255, in spop
return self.execute_command('SPOP', name, *args)
File "/home/ec2-user/env/lib64/python3.7/site-packages/redis/client.py", line 875, in execute_command
conn = self.connection or pool.get_connection(command_name, **options)
File "/home/ec2-user/env/lib64/python3.7/site-packages/redis/connection.py", line 1197, in get_connection
raise ConnectionError('Connection not ready')
redis.exceptions.ConnectionError: Connection not ready
I couldn't find any issue related to this particular error. I emptied/flushed all redis databases, so there should be no data there. I assume it has something to do with eventlet and patching. But even when I put following code right at the beginning of the file, the error appears.
import eventlet
eventlet.monkey_path()
What does this error mean?
Finally, I came up with the answer to my problem.
When connecting to redis with python, I specified the database with the number 0.
redis = redis.Redis(host=example.com, port=6379, db=0)
After changing the dabase to number 1 it worked.
redis = redis.Redis(host=example.com, port=6379, db=1)
Another way is to set protected_mode to no in etc\redis\redis.conf. Recommended when running redis locally.
I am running Neo4j 2.2.1 in ubuntu Amazon EC2 instance. When I am trying to connect through python using py2neo-2.0.7, I am getting following error :
py2neo.packages.httpstream.http.SocketError: Operation not permitted
I am able to access the web-interface through http://52.10.**.***:7474/browser/
CODE :-
from py2neo import Graph, watch, Node, Relationship
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
print url_graph_conn
my_conn = Graph(url_graph_conn)
babynames = my_conn.find("BabyName")
for babyname in babynames:
print 2
Error message :-
https://neo4j:password#52.10.**.***:7474/db/data/
Traceback (most recent call last):
File "C:\Users\rharoon002\eclipse_workspace\peace\peace\core\graphconnection.py", line 39, in <module>
for babyname in babynames:
File "C:\Python27\lib\site-packages\py2neo\core.py", line 770, in find
response = self.cypher.post(statement, parameters)
File "C:\Python27\lib\site-packages\py2neo\core.py", line 667, in cypher
metadata = self.resource.metadata
File "C:\Python27\lib\site-packages\py2neo\core.py", line 213, in metadata
self.get()
File "C:\Python27\lib\site-packages\py2neo\core.py", line 258, in get
response = self.__base.get(headers=headers, redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 966, in get
return self.__get_or_head("GET", if_modified_since, headers, redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 943, in __get_or_head
return rq.submit(redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 433, in submit
http, rs = submit(self.method, uri, self.body, self.headers)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 362, in submit
raise SocketError(code, description, host_port=uri.host_port)
py2neo.packages.httpstream.http.SocketError: Operation not permitted
You are trying to access neo4j via https on the standard port for http (7474):
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
The standard port for a https connection is 7473. Try:
url_graph_conn = "https://neo4j:password#52.10.**.***:7473/db/data/"
And make sure you can access the web interface via https:
https://52.10.**.***:7473/browser/
You can change/see the port settings in your neo4j-server.properties file.
I am trying to right a Google Cloud Platform app in python with Flask that makes an MQTT connection. I have included the paho python library by doing pip install paho-mqtt -t libs/. However, when I try to run the app, even if I don't try to connect to MQTT. I get a weird error about IP address checking:
RuntimeError: error('illegal IP address string passed to inet_pton',)
It seems something in the remote_socket lib is causing a problem. Is this a security issue? Is there someway to disable it?
Relevant code:
from flask import Flask
import paho.mqtt.client as mqtt
import logging as logger
app = Flask(__name__)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
#callback to print out connection status
def on_connect(mosq, obj, rc):
logger.info('on_connect')
if rc == 0:
logger.info("Connected")
mqttc.subscribe('test', 0)
else:
logger.info(rc)
def on_message(mqttc, obj, msg):
logger.info(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
mqttc = mqtt.Client("mqttpy")
mqttc.on_message = on_message
mqttc.on_connect = on_connect
As well as full stack trace:
ERROR 2014-06-03 15:14:57,285 wsgi.py:262]
Traceback (most recent call last):
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/runtime/wsgi.py", line 239, in Handle
handler = _config_handle.add_wsgi_middleware(self._LoadHandler())
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/runtime/wsgi.py", line 298, in _LoadHandler
handler, path, err = LoadObject(self._handler)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/runtime/wsgi.py", line 84, in LoadObject
obj = __import__(path[0])
File "/Users/cbarnes/code/ignite/tank-demo/appengine-flask-demo/main.py", line 24, in <module>
mqttc = mqtt.Client("mqtthtpp")
File "/Users/cbarnes/code/ignite/tank-demo/appengine-flask-demo/lib/paho/mqtt/client.py", line 403, in __init__
self._sockpairR, self._sockpairW = _socketpair_compat()
File "/Users/cbarnes/code/ignite/tank-demo/appengine-flask-demo/lib/paho/mqtt/client.py", line 255, in _socketpair_compat
listensock.bind(("localhost", 0))
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/dist27/socket.py", line 222, in meth
return getattr(self._sock,name)(*args)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 668, in bind
self._SetProtoFromAddr(request.mutable_proxy_external_ip(), address)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 632, in _SetProtoFromAddr
proto.set_packed_address(self._GetPackedAddr(address))
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 627, in _GetPackedAddr
AI_NUMERICSERV|AI_PASSIVE):
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 338, in getaddrinfo
canonical=(flags & AI_CANONNAME))
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 211, in _Resolve
canon, aliases, addresses = _ResolveName(name, families)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/remote_socket/_remote_socket.py", line 229, in _ResolveName
apiproxy_stub_map.MakeSyncCall('remote_socket', 'Resolve', request, reply)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/apiproxy_stub_map.py", line 94, in MakeSyncCall
return stubmap.MakeSyncCall(service, call, request, response)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/apiproxy_stub_map.py", line 328, in MakeSyncCall
rpc.CheckSuccess()
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/api/apiproxy_rpc.py", line 156, in _WaitImpl
self.request, self.response)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py", line 200, in MakeSyncCall
self._MakeRealSyncCall(service, call, request, response)
File "/Users/cbarnes/google-cloud-sdk/platform/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py", line 234, in _MakeRealSyncCall
raise pickle.loads(response_pb.exception())
RuntimeError: error('illegal IP address string passed to inet_pton',)
INFO 2014-06-03 15:14:57,291 module.py:639] default: "GET / HTTP/1.1" 500 -
Thanks!
You're trying to connect to host test and port 0 where typically you should use localhost and 1883
See the getting started example here:
https://pypi.python.org/pypi/paho-mqtt/0.9#usage-and-api