google datastore(ndb) with gevent - python

I am trying to use gevent based server with remote datastore api and, i occasionally get
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gevent/pywsgi.py", line 884, in handle_one_response
self.run_application()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/handler.py", line 76, in run_ap
plication
self.run_websocket()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/handler.py", line 52, in run_we
bsocket
self.application(self.environ, lambda s, h, e=None: [])
File "server.py", line 478, in __call__
current_app.handle()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/resource.py", line 23, in handl
e
self.protocol.on_close()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/protocols/base.py", line 14, in
on_close
self.app.on_close(reason)
File "server.py", line 520, in on_close
current_node.destroy_connection(self.ws)
File "server.py", line 428, in destroy_connection
db.remove_connection(conn.connection_id)
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/database_ndb.py", line 141, in remove_connectio
n
key.delete()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/model.py", line 3451, in _put
return self._put_async(**ctx_options).get_result()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 383, in get_result
self.check_success()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 378, in check_success
self.wait()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 362, in wait
if not ev.run1():
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/eventloop.py", line 253, in run1
delay = self.run0()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/eventloop.py", line 238, in run0
(rpc, self.rpcs))
RuntimeError: rpc <google.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e754a90> was not given to wait_any as a ch
oice {<google.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e779f90>: (<bound method Future._on_rpc_completion of
<Future 7fb46e756f10 created by run_queue(context.py:185) for tasklet _memcache_set_tasklet(context.py:1111); pending>>, (<go
ogle.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e779f90>, '', <google.appengine.datastore.datastore_rpc.Connect
ion object at 0x7fb46e874250>, <generator object _memcache_set_tasklet at 0x7fb46e79b9b0>), {}), <google.appengine.api.apipro
xy_stub_map.UserRPC object at 0x7fb46e75c690>: (<bound method Future._on_rpc_completion of <Future 7fb46e7c8950 created by ru
n_queue(context.py:185) for tasklet _memcache_set_tasklet(context.py:1111); pending>>, (<google.appengine.api.apiproxy_stub_m
ap.UserRPC object at 0x7fb46e75c690>, '', <google.appengine.datastore.datastore_rpc.Connection object at 0x7fb46e7c8ad0>, <ge
nerator object _memcache_set_tasklet at 0x7fb46e6f3640>), {})}
# monkey patched already
try:
import dev_appserver
dev_appserver.fix_sys_path()
except ImportError:
print('Please make sure the App Engine SDK is in your PYTHONPATH.')
raise
email = '----SERVICE_ACCOUNT_EMAIL-------'
from google.appengine.ext.remote_api import remote_api_stub remote_api_stub.ConfigureRemoteApiForOAuth(
'{}.appspot.com'.format("XXXPROJECT_IDXXX"),
'/_ah/remote_api' ,
service_account=email,
key_file_path='KEY---1927925e9abc.p12')
import config
from google.appengine.ext import ndb
from lru_cache import LRUCache
It happens occasionally , with datastore write operations , put and delete.
Any pointers to understand/fix this appreciated.

Related

Runtime Error when running a simple cuML code in a Dask environment

I'm trying to test a simple code using two remote workers. I don't know what is going on and what the error refers to.
The code is simple:
#!/usr/bin/python3
from cuml.dask.cluster import KMeans
from cuml.dask.datasets import make_blobs
from dask.distributed import Client
c = Client("dask-scheduler:8786")
centers = 5
X, _ = make_blobs(n_samples=10000, centers=centers)
k_means = KMeans(n_clusters=centers)
k_means.fit(X)
labels = k_means.predict(X)
It connects but when it tries to execute the cluster algorithm, it throws the following error:
Traceback (most recent call last):
File "test_cuml.py", line 15, in <module>
k_means.fit(X)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/common/memory_utils.py", line 93, in cupy_rmm_wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/dask/cluster/kmeans.py", line 161, in fit
comms.init(workers=data.workers)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 209, in init
wait=True,
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 2506, in run
return self.sync(self._run, function, *args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 869, in sync
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/utils.py", line 332, in sync
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/utils.py", line 315, in f
result[0] = yield future
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/tornado/gen.py", line 762, in run
value = future.result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 2443, in _run
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 429, in _func_init_all
_func_init_nccl(sessionId, uniqueId)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 484, in _func_init_nccl
n.init(nWorkers, uniqueId, wid)
File "cuml/raft/dask/common/nccl.pyx", line 151, in cuml.raft.dask.common.nccl.nccl.init
The workers are reporting this issue:
distributed.worker - INFO - Run out-of-band function '_func_init_all'
distributed.worker - WARNING - Run Failed
Function: _func_init_all
args: (b'\x95d$\x89\x9beI\xf5\xa7\x8c7M\xe8V[v', b'\x02\x00\xc8\xdd\x8fj\x07\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', False, {'tcp://dask-scheduler:40439': {'rank': 0}, 'tcp://dask-scheduler:39645': {'rank': 1}}, False, 0)
kwargs: {}
Traceback (most recent call last):
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/worker.py", line 4553, in run
result = await function(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 429, in _func_init_all
_func_init_nccl(sessionId, uniqueId)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 484, in _func_init_nccl
n.init(nWorkers, uniqueId, wid)
File "cuml/raft/dask/common/nccl.pyx", line 151, in cuml.raft.dask.common.nccl.nccl.init
RuntimeError: NCCL_ERROR: b'invalid usage'
Does anyone know what is happening or how to mitigate this? For me the error is not so clear. I tried with several versions of RAPIDS. IMPORTANT: I'm running in a docker environment sharing all GPUs (--gpus all) and network settings (--network host).

AttributeError: 'lxml.etree.QName' object has no attribute 'resolve'

I am trying to use python Zeep library in order to play with some SOAP API. But I can not figure out what is my issue when trying to create the client. Below is a sample of my code:
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings
from zeep.cache import SqliteCache
from zeep.transports import Transport
from conf.shared_vars import B2B_PROXY, WSDL_PROXY
session = Session()
session.auth = HTTPBasicAuth(B2B_PROXY['key'], B2B_PROXY['secret'])
wsdl = WSDL_PROXY + "SomeServices.wsdl"
client = Client(
wsdl=wsdl,
transport=Transport(
session=session,
cache=SqliteCache(path='./sqlite.db')))
When executing that script, it seems to load data (./sqlite is not empty), but I get the following error (traceback):
File "test_zeep.py", line 17, in <module>
cache=SqliteCache(path='./sqlite.db')))
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/client.py", line 68, in __init__
self.wsdl = Document(wsdl, self.transport, settings=self.settings)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 82, in __init__
root_definitions = Definition(self, document, self.location)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 184, in __init__
self.parse_types(doc)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 316, in parse_types
self.types.add_documents(schema_nodes, self.location)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 117, in add_documents
document.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 475, in resolve
_resolve_dict(self._elements)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 456, in _resolve_dict
new = obj.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/unresolved.py", line 23, in resolve
return retval.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 355, in resolve
self._resolved = self.extend(self._extension)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 401, in extend
self._element = self._element.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/indicators.py", line 213, in resolve
self[i] = elm.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/unresolved.py", line 23, in resolve
return retval.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 361, in resolve
self._element = self._element.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/indicators.py", line 213, in resolve
self[i] = elm.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/collection.py", line 21, in resolve
self.item_type = self.item_type.resolve()
AttributeError: 'lxml.etree.QName' object has no attribute 'resolve'
Unfortunately, I do not know what do with this information, what it involves and how to overcome the issue so I can use properly the client!
Thanks for the help you could offer me on this topic.
Well one way to get through this is to modify the involved method in zeep/xsd/types/collection.py:
def resolve(self):
try:
self.item_type = self.item_type.resolve()
except Exception:
print("No resolve method for {}".format(self.item_type))
self.base_class = self.item_type.__class__
return self
This is just a fix and definitly not the best solution, but at least it allows me to use properly Zeep client! I will fill an issue on Zeep GitHub.

Timeout context manager error using sanic and telepot same time

I'm trying to create a web app that communicates with Telegram. And trying to use Sanic web framework with Telepot. Both are asyncio based. Now I'm getting a very weird error.
This is my code:
import datetime
import telepot.aio
from sanic import Sanic
app = Sanic(__name__, load_env=False)
app.config.LOGO = ''
#app.listener('before_server_start')
async def server_init(app, loop):
app.bot = telepot.aio.Bot('anything', loop=loop)
# here we fall
await app.bot.sendMessage(
"#test",
"Wao! {}".format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),)
)
if __name__ == "__main__":
app.run(
debug=True
)
The error that I'm getting is:
[2018-01-18 22:41:43 +0200] [10996] [ERROR] Experienced exception while trying to serve
Traceback (most recent call last):
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/sanic/app.py", line 646, in run
serve(**server_settings)
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/sanic/server.py", line 588, in serve
trigger_events(before_start, loop)
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/sanic/server.py", line 496, in trigger_events
loop.run_until_complete(result)
File "uvloop/loop.pyx", line 1364, in uvloop.loop.Loop.run_until_complete
File "/home/mk/Dev/project/sanic-telepot.py", line 14, in server_init
"Wao! {}".format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),)
File "/usr/lib/python3.6/asyncio/coroutines.py", line 109, in __next__
return self.gen.send(None)
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/telepot/aio/__init__.py", line 100, in sendMessage
return await self._api_request('sendMessage', _rectify(p))
File "/usr/lib/python3.6/asyncio/coroutines.py", line 109, in __next__
return self.gen.send(None)
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/telepot/aio/__init__.py", line 78, in _api_request
return await api.request((self._token, method, params, files), **kwargs)
File "/usr/lib/python3.6/asyncio/coroutines.py", line 109, in __next__
return self.gen.send(None)
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/telepot/aio/api.py", line 139, in request
async with fn(*args, **kwargs) as r:
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/aiohttp/client.py", line 690, in __aenter__
self._resp = yield from self._coro
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/aiohttp/client.py", line 221, in _request
with timer:
File "/home/mk/Dev/project/venv/lib/python3.6/site-packages/aiohttp/helpers.py", line 712, in __enter__
raise RuntimeError('Timeout context manager should be used '
RuntimeError: Timeout context manager should be used inside a task
Telepot inside uses aiohttp as dependency and for the HTTP calls. And the very similar code is working if I make very similar functionality with just aiohttp.web.
I'm not sure to what project this problem is related. Also, all other dependencies like redis, database connections that I connected the same approach are working perfectly.
Any suggestions how to fix it?

Running into xml invalid attribute name for NETCONF python library

I'm using a high level python library ncclient to edit the configuration of a NETCONF device but I run into this error:
ValueError: Invalid attribute name u'xmlns:if'
I suspect it has something to do with an xml namespace problem since the lxml library is complaining about an attribute name
All I'm doing is creating a connection to the device and then closing it
manager = ncclient.manager.connect(
host=host,
port=port,
username=username,
password=b64decode(password),
device_params={
"name": "nexus",
"ssh_subsystem_name": "xmlagent"
}
)
manager.close_session()
Here's a stack trace:
Traceback (most recent call last):
File "./switch_config.py", line 41, in <module>
main()
File "./switch_config.py", line 26, in main
manager.close_session()
File "/usr/lib/python2.6/site-packages/ncclient/manager.py", line 107, in wrapper
return self.execute(op_cls, *args, **kwds)
File "/usr/lib/python2.6/site-packages/ncclient/manager.py", line 174, in execute
raise_mode=self._raise_mode).request(*args, **kwds)
File "/usr/lib/python2.6/site-packages/ncclient/operations/session.py", line 28, in request
return self._request(new_ele("close-session"))
File "/usr/lib/python2.6/site-packages/ncclient/operations/rpc.py", line 290, in _request
req = self._wrap(op)
File "/usr/lib/python2.6/site-packages/ncclient/operations/rpc.py", line 275, in _wrap
**self._device_handler.get_xml_extra_prefix_kwargs())
File "/usr/lib/python2.6/site-packages/ncclient/xml_.py", line 153, in <lambda>
new_ele = lambda tag, attrs={}, **extra: etree.Element(qualify(tag), attrs, **extra)
File "lxml.etree.pyx", line 2812, in lxml.etree.Element (src/lxml/lxml.etree.c:61433)
File "apihelpers.pxi", line 123, in lxml.etree._makeElement (src/lxml/lxml.etree.c:13864)
File "apihelpers.pxi", line 111, in lxml.etree._makeElement (src/lxml/lxml.etree.c:13736)
File "apihelpers.pxi", line 263, in lxml.etree._initNodeAttributes (src/lxml/lxml.etree.c:15391)
File "apihelpers.pxi", line 1524, in lxml.etree._attributeValidOrRaise (src/lxml/lxml.etree.c:26886)
ValueError: Invalid attribute name u'xmlns:if'
I eventually got it work on NX-OS by:
Remove all the namespaces in ncclient/devices/nexus.py and add namespace "xmlns":"http://www.cisco.com/nxos:1.0:netconf".
def get_xml_base_namespace_dict(self):
return { "xmlns":"http://www.cisco.com/nxos:1.0:netconf" } #Add root namespace
def get_xml_extra_prefix_kwargs(self):
d = {
# "xmlns:nxos":"http://www.cisco.com/nxos:1.0", #remove other namespaces
# "xmlns:if":"http://www.cisco.com/nxos:1.0:if_manager"
}
d.update(self.get_xml_base_namespace_dict())
return d

App Engine Python: AttributeError: 'module' object has no attribute 'Stock'

I am getting this error just in production. On localhost it works well.
Traceback (most recent call last):
File "/base/python_runtime/python_lib/versions/1/google/appengine/ext/webapp/_webapp25.py", line 701, in __call__
handler.get(*groups)
File "/base/data/home/apps/s~ordenaacoes/2.357768699674437719/controllers/mainh.py", line 74, in get
'stocks': goodStocks(),
File "/base/data/home/apps/s~ordenaacoes/2.357768699674437719/controllers/mainh.py", line 108, in goodStocks
goodStocks = memcache.get("goodStocks")
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/memcache/__init__.py", line 574, in get
results = rpc.get_result()
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/apiproxy_stub_map.py", line 592, in get_result
return self.__get_result_hook(self)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/memcache/__init__.py", line 639, in __get_hook
self._do_unpickle)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/memcache/__init__.py", line 271, in _decode_value
return do_unpickle(value)
File "/base/python_runtime/python_lib/versions/1/google/appengine/api/memcache/__init__.py", line 412, in _do_unpickle
return unpickler.load()
File "/base/python_runtime/python_dist/lib/python2.5/pickle.py", line 852, in load
dispatch[key](self)
File "/base/python_runtime/python_dist/lib/python2.5/pickle.py", line 1084, in load_global
klass = self.find_class(module, name)
File "/base/python_runtime/python_dist/lib/python2.5/pickle.py", line 1119, in find_class
klass = getattr(mod, name)
AttributeError: 'module' object has no attribute 'Stock'
Stock is one class of my models. I tested with python 2.5 on localhost too.
The line that gives the error is the access to memcache (get function).
I have changed the project and maybe the type of the data I put in memcache is different. Do I have some way to clean the data on memcache?
Any idea?
As of release 1.6.4 there is a Memcache Viewer in the Admin Console. It includes a "Flush Cache" button that should do exactly what you need.
Most likely you have a pickled version of an object in memcache that doesn't match your new code. Here's an old question on flushing memcache, the answer should apply to your case:
How can I have Google App Engine clear memcache every time a site is deployed?

Categories

Resources