I'm using a high level python library ncclient to edit the configuration of a NETCONF device but I run into this error:
ValueError: Invalid attribute name u'xmlns:if'
I suspect it has something to do with an xml namespace problem since the lxml library is complaining about an attribute name
All I'm doing is creating a connection to the device and then closing it
manager = ncclient.manager.connect(
host=host,
port=port,
username=username,
password=b64decode(password),
device_params={
"name": "nexus",
"ssh_subsystem_name": "xmlagent"
}
)
manager.close_session()
Here's a stack trace:
Traceback (most recent call last):
File "./switch_config.py", line 41, in <module>
main()
File "./switch_config.py", line 26, in main
manager.close_session()
File "/usr/lib/python2.6/site-packages/ncclient/manager.py", line 107, in wrapper
return self.execute(op_cls, *args, **kwds)
File "/usr/lib/python2.6/site-packages/ncclient/manager.py", line 174, in execute
raise_mode=self._raise_mode).request(*args, **kwds)
File "/usr/lib/python2.6/site-packages/ncclient/operations/session.py", line 28, in request
return self._request(new_ele("close-session"))
File "/usr/lib/python2.6/site-packages/ncclient/operations/rpc.py", line 290, in _request
req = self._wrap(op)
File "/usr/lib/python2.6/site-packages/ncclient/operations/rpc.py", line 275, in _wrap
**self._device_handler.get_xml_extra_prefix_kwargs())
File "/usr/lib/python2.6/site-packages/ncclient/xml_.py", line 153, in <lambda>
new_ele = lambda tag, attrs={}, **extra: etree.Element(qualify(tag), attrs, **extra)
File "lxml.etree.pyx", line 2812, in lxml.etree.Element (src/lxml/lxml.etree.c:61433)
File "apihelpers.pxi", line 123, in lxml.etree._makeElement (src/lxml/lxml.etree.c:13864)
File "apihelpers.pxi", line 111, in lxml.etree._makeElement (src/lxml/lxml.etree.c:13736)
File "apihelpers.pxi", line 263, in lxml.etree._initNodeAttributes (src/lxml/lxml.etree.c:15391)
File "apihelpers.pxi", line 1524, in lxml.etree._attributeValidOrRaise (src/lxml/lxml.etree.c:26886)
ValueError: Invalid attribute name u'xmlns:if'
I eventually got it work on NX-OS by:
Remove all the namespaces in ncclient/devices/nexus.py and add namespace "xmlns":"http://www.cisco.com/nxos:1.0:netconf".
def get_xml_base_namespace_dict(self):
return { "xmlns":"http://www.cisco.com/nxos:1.0:netconf" } #Add root namespace
def get_xml_extra_prefix_kwargs(self):
d = {
# "xmlns:nxos":"http://www.cisco.com/nxos:1.0", #remove other namespaces
# "xmlns:if":"http://www.cisco.com/nxos:1.0:if_manager"
}
d.update(self.get_xml_base_namespace_dict())
return d
Related
I am trying to obtain the list of events from a minikube cluster usigh the Python Kubernetes api using the following code:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
print(api.list_event_for_all_namespaces())
I am getting the following error:
C:\Users\lameg\kubesense>python test.py
Traceback (most recent call last):
File "C:\Users\lameg\kubesense\test.py", line 6, in <module>
print(api.list_event_for_all_namespaces())
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 651, in list_event_for_all_namespaces
return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 758, in list_event_for_all_namespaces_with_http_info
return self.api_client.call_api(
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 348, in call_api
return self.__call_api(resource_path, method,
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 192, in __call_api
return_data = self.deserialize(response_data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 264, in deserialize
return self.__deserialize(data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 639, in __deserialize_model
kwargs[attr] = self.__deserialize(value, attr_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in __deserialize
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in <listcomp>
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 641, in __deserialize_model
instance = klass(**kwargs)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 112, in __init__
self.event_time = event_time
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 291, in event_time
raise ValueError("Invalid value for `event_time`, must not be `None`") # noqa: E501
ValueError: Invalid value for `event_time`, must not be `None`
Any ideas ?
That looks like either a bug in the Python client, or a bug in the OpenAPI specification used to generate the client: clearly, null is a value for eventTime that is supported by the API.
I think the only workaround is to monkey-patch the kubernetes.client module so that it accepts null values. Something like this:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
# This is descriptor, see https://docs.python.org/3/howto/descriptor.html
class FakeEventTime:
def __get__(self, obj, objtype=None):
return obj._event_time
def __set__(self, obj, value):
obj._event_time = value
# Monkey-patch the `event_time` attribute of ` the V1beta1Event class.
client.V1beta1Event.event_time = FakeEventTime()
# Now this works.
events = api.list_event_for_all_namespaces()
The above code runs successfully against my OpenShift instance, whereas previously it would fail as you describe in your question.
I am trying to use python Zeep library in order to play with some SOAP API. But I can not figure out what is my issue when trying to create the client. Below is a sample of my code:
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings
from zeep.cache import SqliteCache
from zeep.transports import Transport
from conf.shared_vars import B2B_PROXY, WSDL_PROXY
session = Session()
session.auth = HTTPBasicAuth(B2B_PROXY['key'], B2B_PROXY['secret'])
wsdl = WSDL_PROXY + "SomeServices.wsdl"
client = Client(
wsdl=wsdl,
transport=Transport(
session=session,
cache=SqliteCache(path='./sqlite.db')))
When executing that script, it seems to load data (./sqlite is not empty), but I get the following error (traceback):
File "test_zeep.py", line 17, in <module>
cache=SqliteCache(path='./sqlite.db')))
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/client.py", line 68, in __init__
self.wsdl = Document(wsdl, self.transport, settings=self.settings)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 82, in __init__
root_definitions = Definition(self, document, self.location)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 184, in __init__
self.parse_types(doc)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/wsdl/wsdl.py", line 316, in parse_types
self.types.add_documents(schema_nodes, self.location)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 117, in add_documents
document.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 451, in resolve
schema.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 475, in resolve
_resolve_dict(self._elements)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/schema.py", line 456, in _resolve_dict
new = obj.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/unresolved.py", line 23, in resolve
return retval.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 355, in resolve
self._resolved = self.extend(self._extension)
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 401, in extend
self._element = self._element.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/indicators.py", line 213, in resolve
self[i] = elm.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/unresolved.py", line 23, in resolve
return retval.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/complex.py", line 361, in resolve
self._element = self._element.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/indicators.py", line 213, in resolve
self[i] = elm.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 301, in resolve
self.resolve_type()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/elements/element.py", line 298, in resolve_type
self.type = self.type.resolve()
File "/home/max/Documents/dev/django/nmtoolpy/venv/lib/python3.6/site-packages/zeep/xsd/types/collection.py", line 21, in resolve
self.item_type = self.item_type.resolve()
AttributeError: 'lxml.etree.QName' object has no attribute 'resolve'
Unfortunately, I do not know what do with this information, what it involves and how to overcome the issue so I can use properly the client!
Thanks for the help you could offer me on this topic.
Well one way to get through this is to modify the involved method in zeep/xsd/types/collection.py:
def resolve(self):
try:
self.item_type = self.item_type.resolve()
except Exception:
print("No resolve method for {}".format(self.item_type))
self.base_class = self.item_type.__class__
return self
This is just a fix and definitly not the best solution, but at least it allows me to use properly Zeep client! I will fill an issue on Zeep GitHub.
Running a simple script for backing up L2 startup configuration i always get this type of output referring TypeError:
No handlers could be found for logger "paramiko.transport"
Traceback (most recent call last):
File "hp_tftp.py", line 108, in
net_connect = ConnectHandler(**a_device)
File "/usr/local/lib/python2.7/dist-packages/netmiko-1.5.1-py2.7.egg/netmiko/ssh_dispatcher.py", line 167, in ConnectHandler
return ConnectionClass(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/netmiko-1.5.1-py2.7.egg/netmiko/base_connection.py", line 187, in init
self.establish_connection()
File "/usr/local/lib/python2.7/dist-packages/netmiko-1.5.1-py2.7.egg/netmiko/base_connection.py", line 650, in establish_connection
self.remote_conn_pre.connect(**ssh_connect_params)
File "/usr/local/lib/python2.7/dist-packages/paramiko/client.py", line 392, in connect
t.start_client(timeout=timeout)
File "/usr/local/lib/python2.7/dist-packages/paramiko/transport.py", line 545, in start_client
raise e
TypeError: 'type' object is not iterable
And this is the code:
for a_device in all_devices:
net_connect = ConnectHandler(**a_device)
print("----------------------------------------BEGIN----------------------------------------")
print ("\n\n>>>>>>>>> Copying Config {0} to TFTP Server <<<<<<<<<".format(a_device['device_type']))
net_connect.send_command("copy startup-config tftp [ip_server] pdy.sw.l2.-{0}-{1}".format(a_device['ip'],date))
time.sleep(1)
print ("\n\n>>>>>>>>> Startup-Config Has Been Sent {0} <<<<<<<<<".format(a_device['ip']))
print ("\n\n----------------------------------------END----------------------------------------\n\n")
The 108 line is from net_connect = ConnectHandler(**a_device)
I am trying to use gevent based server with remote datastore api and, i occasionally get
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gevent/pywsgi.py", line 884, in handle_one_response
self.run_application()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/handler.py", line 76, in run_ap
plication
self.run_websocket()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/handler.py", line 52, in run_we
bsocket
self.application(self.environ, lambda s, h, e=None: [])
File "server.py", line 478, in __call__
current_app.handle()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/resource.py", line 23, in handl
e
self.protocol.on_close()
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/geventwebsocket/protocols/base.py", line 14, in
on_close
self.app.on_close(reason)
File "server.py", line 520, in on_close
current_node.destroy_connection(self.ws)
File "server.py", line 428, in destroy_connection
db.remove_connection(conn.connection_id)
File "/home/abhinavabcd_gmail_com/samosa-scripts/samosa_messaging_framework/database_ndb.py", line 141, in remove_connectio
n
key.delete()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/model.py", line 3451, in _put
return self._put_async(**ctx_options).get_result()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 383, in get_result
self.check_success()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 378, in check_success
self.wait()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/tasklets.py", line 362, in wait
if not ev.run1():
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/eventloop.py", line 253, in run1
delay = self.run0()
File "/home/abhinavabcd_gmail_com/google_appengine/google/appengine/ext/ndb/eventloop.py", line 238, in run0
(rpc, self.rpcs))
RuntimeError: rpc <google.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e754a90> was not given to wait_any as a ch
oice {<google.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e779f90>: (<bound method Future._on_rpc_completion of
<Future 7fb46e756f10 created by run_queue(context.py:185) for tasklet _memcache_set_tasklet(context.py:1111); pending>>, (<go
ogle.appengine.api.apiproxy_stub_map.UserRPC object at 0x7fb46e779f90>, '', <google.appengine.datastore.datastore_rpc.Connect
ion object at 0x7fb46e874250>, <generator object _memcache_set_tasklet at 0x7fb46e79b9b0>), {}), <google.appengine.api.apipro
xy_stub_map.UserRPC object at 0x7fb46e75c690>: (<bound method Future._on_rpc_completion of <Future 7fb46e7c8950 created by ru
n_queue(context.py:185) for tasklet _memcache_set_tasklet(context.py:1111); pending>>, (<google.appengine.api.apiproxy_stub_m
ap.UserRPC object at 0x7fb46e75c690>, '', <google.appengine.datastore.datastore_rpc.Connection object at 0x7fb46e7c8ad0>, <ge
nerator object _memcache_set_tasklet at 0x7fb46e6f3640>), {})}
# monkey patched already
try:
import dev_appserver
dev_appserver.fix_sys_path()
except ImportError:
print('Please make sure the App Engine SDK is in your PYTHONPATH.')
raise
email = '----SERVICE_ACCOUNT_EMAIL-------'
from google.appengine.ext.remote_api import remote_api_stub remote_api_stub.ConfigureRemoteApiForOAuth(
'{}.appspot.com'.format("XXXPROJECT_IDXXX"),
'/_ah/remote_api' ,
service_account=email,
key_file_path='KEY---1927925e9abc.p12')
import config
from google.appengine.ext import ndb
from lru_cache import LRUCache
It happens occasionally , with datastore write operations , put and delete.
Any pointers to understand/fix this appreciated.
I recently published a working scrape to scrapyd. I'm getting the error message below when I run the scrape.
I reviewed this closed issue: https://github.com/scrapy/scrapy/issues/86 and implemented the recommended fix per the docs: http://scrapyd.readthedocs.org/en/latest/config.html
My config file lives here: :/etc/scrapyd/conf.d/000-default
I cannot seem to disable the Feedexporter extension by setting items_dir to none. Is there a new way to disable feedexporter in scrapyd?
Config file:
[scrapyd]
http_port = 6800
debug = off
#max_proc = 1
eggs_dir = /var/lib/scrapyd/eggs
dbs_dir = /var/lib/scrapyd/dbs
items_dir =
logs_dir = /var/log/scrapyd
Error Message:
2014-10-12 06:29:15-0500 [jsc] ERROR: Error caught on signal handler: <bound method ?.item_scraped of <scrapy.contrib.feedexport.FeedExporter object at 0x7fc879ff82d0>>
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 577, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/core/scraper.py", line 215, in _itemproc_finished
item=output, response=response, spider=spider)
File "/usr/lib/pymodules/python2.7/scrapy/signalmanager.py", line 23, in send_catch_log_deferred
return signal.send_catch_log_deferred(*a, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/utils/signal.py", line 53, in send_catch_log_deferred
*arguments, **named)
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 139, in maybeDeferred
result = f(*args, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/xlib/pydispatch/robustapply.py", line 54, in robustApply
return receiver(*arguments, **named)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/feedexport.py", line 190, in item_scraped
slot.exporter.export_item(item)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/exporter/__init__.py", line 87, in export_item
itemdict = dict(self._get_serialized_fields(item))
File "/usr/lib/pymodules/python2.7/scrapy/contrib/exporter/__init__.py", line 71, in _get_serialized_fields
field = item.fields[field_name]
exceptions.AttributeError: 'dict' object has no attribute 'fields'
I was able to disable FeedExporter in scrapyd by adding the following line to settings.py
EXTENSIONS = {'scrapy.contrib.feedexport.FeedExporter': None}
Disabling FeedExporter solved the problem.