Fault: No Attribute in xmlrpclib.py - python

I'm working on an application in which a server and client are being created; the ServerAPI is using SimpleXMLRPCServer and the ClientAPI is using xmlrpclib. The client is initiated with:
class w_Client:
def __init__(self, ServerIP, ServerPort, ClientIP):
self.conn = xmlrpclib.ServerProxy("http://" + ServerIP + ":" + str(ServerPort))
self.ClientIP = ClientIP
upon a button being pressed in the application, an xml specification file is created and passed thru
def Create(self, XMLstring):
return self.conn.Create(XMLstring, self.ClientIP)
I've already checked to make sure that the XMLstring is valid XML; however, when I get press the button, I get the following error:
Traceback (most recent call last):
File "/home/app/UI/MainWindow.py", line 461, in compile
xmlFile = compiler.compile()
File "/home/app/Core/Compiler.py", line 75, in compile
self.compile_top()
File "/home/app/Core/Compiler.py", line 354, in compile_top
status = mainWidgets["w_client"].Create(xmlString)
File "/home/app/Wireless/ClientAPI.py", line 12, in Create
return self.conn.Create(XMLstring, self.ClientIP)
File "/usr/lib/python2.7/xmlrpclib.py", line 1233, in __call__
return self.__send(self.__name, args)
File "/usr/lib/python2.7/xmlrpclib.py", line 1591, in __request
verbose=self.__verbose
File "/usr/lib/python2.7/xmlrpclib.py", line 1273, in request
return self.single_request(host, handler, request_body, verbose)
File "/usr/lib/python2.7/xmlrpclib.py", line 1306, in single_request
return self.parse_response(response)
File "/usr/lib/python2.7/xmlrpclib.py", line 1482, in parse_response
return u.close()
File "/usr/lib/python2.7/xmlrpclib.py", line 794, in close
raise Fault(**self._stack[0])
xmlrpclib.Fault: <Fault 1: "<type 'exceptions.TypeError'>:'NoneType' object has no attribute '__getitem__'">
I've also made sure that the ClientIP is passed correctly. Otherwise, I'm not entirely sure what's going on or how to even go about fixing it.

<type 'exceptions.TypeError'>:'NoneType' object has no attribute '__getitem__'
This exception may have been generated by the xmlrpc method you were calling (i.e. server side).
I suggest that you add verbose=True to your instantiation of the server proxy:
xmlrpclib.ServerProxy("http://" + ServerIP + ":" + str(ServerPort),verbose=True)
This will allow you to see what you're sending and receiving.
It seems the method you're calling is expecting a dict

Related

Server Selection Timeout Error, MongoDB Change Streams

I'm trying to test insert into Mongo Change stream with the following code. Only these 2lines
client = pymongo.MongoClient(CONNECTION_STR)
print(client.changestream.collection.insert_one({"hello": "world"}).inserted_id)
The error I'm getting when I run this python file is as follow,
Traceback (most recent call last):
File "D:\PyMongo Change Streams\test.py", line 7, in <module>
print(client.changestream.collection.insert_one({"hello": "world"}).inserted_id)
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\collection.py", line 697, in insert_one
File "C:\Users\User\AppData\Local\Programs\Python\Python310\lib\contextlib.py", line 135, in __enter__
return next(self.gen)
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\mongo_client.py", line 1611, in _tmp_session
s = self._ensure_session(session)
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\mongo_client.py", line 1598, in _ensure_session
return self.__start_session(True, causal_consistency=False)
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\mongo_client.py", line 1551, in __start_session
server_session = self._get_server_session()
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\mongo_client.py", line 1584, in _get_server_session
return self._topology.get_server_session()
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\topology.py", line 431, in get_server_session
self._select_servers_loop(
File "D:\PyMongo Change Streams\myvenv\lib\site-packages\pymongo\topology.py", line 199, in _select_servers_loop
raise ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: PY_SSIZE_T_CLEAN macro must be defined for '#' formats,PY_SSIZE_T_CLEAN macro must be defined for '#' formats,PY_SSIZE_T_CLEAN macro must be defined for '#' formats
I've also test the connection with the following code, and it's working well and print out db info.
# try:
# client = pymongo.MongoClient(CONNECTION_STR)
# db = client.API
# print(db)
# except Exception as e:
# print(e)
Can anyone please help me explain about this error?
ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: PY_SSIZE_T_CLEAN macro must be defined for '#' formats,PY_SSIZE_T_CLEAN macro must be defined for '#' formats,PY_SSIZE_T_CLEAN macro must be defined for '#' formats
It's my very first time working with MongoDB changestream. Please help me. Thank you in advance!
I just check the packages' versions and fixed this.

How to get list of events using the Python kubernetes API?

I am trying to obtain the list of events from a minikube cluster usigh the Python Kubernetes api using the following code:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
print(api.list_event_for_all_namespaces())
I am getting the following error:
C:\Users\lameg\kubesense>python test.py
Traceback (most recent call last):
File "C:\Users\lameg\kubesense\test.py", line 6, in <module>
print(api.list_event_for_all_namespaces())
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 651, in list_event_for_all_namespaces
return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 758, in list_event_for_all_namespaces_with_http_info
return self.api_client.call_api(
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 348, in call_api
return self.__call_api(resource_path, method,
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 192, in __call_api
return_data = self.deserialize(response_data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 264, in deserialize
return self.__deserialize(data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 639, in __deserialize_model
kwargs[attr] = self.__deserialize(value, attr_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in __deserialize
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in <listcomp>
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 641, in __deserialize_model
instance = klass(**kwargs)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 112, in __init__
self.event_time = event_time
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 291, in event_time
raise ValueError("Invalid value for `event_time`, must not be `None`") # noqa: E501
ValueError: Invalid value for `event_time`, must not be `None`
Any ideas ?
That looks like either a bug in the Python client, or a bug in the OpenAPI specification used to generate the client: clearly, null is a value for eventTime that is supported by the API.
I think the only workaround is to monkey-patch the kubernetes.client module so that it accepts null values. Something like this:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
# This is descriptor, see https://docs.python.org/3/howto/descriptor.html
class FakeEventTime:
def __get__(self, obj, objtype=None):
return obj._event_time
def __set__(self, obj, value):
obj._event_time = value
# Monkey-patch the `event_time` attribute of ` the V1beta1Event class.
client.V1beta1Event.event_time = FakeEventTime()
# Now this works.
events = api.list_event_for_all_namespaces()
The above code runs successfully against my OpenShift instance, whereas previously it would fail as you describe in your question.

Error on writing to Google cloud spanner using Google cloud functions

I am trying to insert data into cloud spanner table using cloud functions but it is throwing the error given below.Reading data from cloud spanner is working properly but writing using both the Data Definition Language commands and batch.insert method both throws the same error. I am thinking its some kind of permissions problem! I don't know how to fix it?
Requirements file contains only google-cloud-spanner==1.7.1
Code running in cloud functions
import json
from google.cloud import spanner
INSTANCE_ID = 'AARISTA'
DATABASE_ID = 'main'
TABLE_NAME = 'userinfo'
dataDict = None
def new_user(request):
dataDict = json.loads(request.data)# Data is available in dict format
if dataDict['USER_ID']==None:
return "User id empty"
elif dataDict['IMEI'] == None:
return "Imei number empty"
elif dataDict['DEVICE_ID'] == None:
return "Device ID empty"
elif dataDict['NAME'] == None:
return "Name field is empty"
elif dataDict['VIRTUAL_PRIVATE_KEY']== None:
return "User's private key cant be empty"
else:
return insert_data(INSTANCE_ID,DATABASE_ID)
def insert_data(instance_id, database_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_user(transcation):
row_ct= transcation.execute_update("INSERT userinfo
(USER_ID,DEVICE_ID,IMEI,NAME,VIRTUAL_PRIVATE_KEY) VALUES"
"("+dataDict['USER_ID']+',
'+dataDict['DEVICE_ID']+', '+ dataDict['IMEI']+',
'+dataDict['NAME']+',
'+dataDict['VIRTUAL_PRIVATE_KEY']+")")
database.run_in_transaction(insert_user)
return 'Inserted data.'
Error logs on Cloud Functions
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 265, in get session = self._sessions.get_nowait()
File "/opt/python3.7/lib/python3.7/queue.py", line 198, in get_nowait return self.get(block=False)
File "/opt/python3.7/lib/python3.7/queue.py", line 167, in get raise Empty _queue.Empty
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable return callable_(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/grpc/_channel.py", line 547, in __call__ return _end_unary_response_blocking(state, call, False, None)
File "/env/local/lib/python3.7/site-packages/grpc/_channel.py", line 466, in _end_unary_response_blocking raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.INVALID_ARGUMENT details = "Invalid CreateSession request." debug_error_string = "{"created":"#1547373361.398535906","description":"Error received from peer","file":"src/core/lib/surface/call.cc","file_line":1036,"grpc_message":"Invalid> CreateSession request.","grpc_status":3}" >
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 297, in run_http_function result = _function_handler.invoke_user_function(flask.request)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 199, in invoke_user_function return call_user_function(request_or_event)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 192, in call_user_function return self._user_function(request_or_event)
File "/user_code/main.py", line 21, in new_user return insert_data(INSTANCE_ID,DATABASE_ID)
File "/user_code/main.py", line 31, in insert_data database.run_in_transaction(insert_user)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/database.py", line 438, in run_in_transaction with SessionCheckout(self._pool) as session:
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 519, in __enter__ self._session = self._pool.get(**self._kwargs)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 268, in get session.create()
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/session.py", line 116, in create session_pb = api.create_session(self._database.name, metadata=metadata, **kw)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/gapic/spanner_client.py", line 276, in create_session request, retry=retry, timeout=timeout, metadata=metadata
File "/env/local/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 143, in __call__ return wrapped_func(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/google/api_core/retry.py", line 270, in retry_wrapped_func on_error=on_error,
File "/env/local/lib/python3.7/site-packages/google/api_core/retry.py", line 179, in retry_target return target()
File "/env/local/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout return func(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.InvalidArgument: 400 Invalid CreateSession request.
I tried to reproduce this but it seems to work for me as a Python 3.7 function. I used the latest google-cloud-spanner library in requirements.txt.
While I am unsure what would be causing your error I did notice a few other things.
It seemed odd to declare a global dataDict and not use the one constructed and pass it. Instead I added that as a param to the insert method.
The spacing of the query was a bit odd and the use of single and double quotes was odd. this made it hard to parse visually. As the function runs as python 3.7 you can also use f-strings which likely would make it even more readable.
Here is the code I ran in a function that seemed to work.
import json
from google.cloud import spanner
INSTANCE_ID = 'testinstance'
DATABASE_ID = 'testdatabase'
TABLE_ID = 'userinfo'
def new_user(request):
data = { 'USER_ID': '10', 'DEVICE_ID': '11' }
return insert_data(INSTANCE_ID, DATABASE_ID, data)
def insert_data(instance_id, database_id, data):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_user(transaction):
query = f"INSERT {TABLE_ID} (USER_ID,DEVICE_ID) VALUES ({data['USER_ID']},{data['DEVICE_ID']})"
row_ct = transaction.execute_update(query)
database.run_in_transaction(insert_user)
return 'Inserted data.'

Python - RuntimeError: working outside of request context

Trying to get the GET parameters from the URL. I have it working in my __init__.py file, but in a different file its not working.
I tried to use with app.app_context(): but I am still getting the same issue.
def log_entry(entity, type, entity_id, data, error):
with app.app_context():
zip_id = request.args.get('id')
RuntimeError: working outside of request context
Any suggestions?
Additional Info:
This is using Flask web framework which is setup as a service (API).
Example URL the user would hit http://website.com/api/endpoint?id=1
As mentioned above using `zip_id = request.args.get('id') works fine in the main file but I am in runners.py (just another file with definitions in)
Full traceback:
Debugging middleware caught exception in streamed response at a point where response headers were already sent.
Traceback (most recent call last):
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/wsgi.py", line 703, in __next__
return self._next()
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/wrappers.py", line 81, in _iter_encoded
for item in iterable:
File "/Users/ereeve/Documents/TechSol/pi-automation-api/automation_api/runners.py", line 341, in create_agencies
log_entry("test", "created", 1, "{'data':'hey'}", "")
File "/Users/ereeve/Documents/TechSol/pi-automation-api/automation_api/runners.py", line 315, in log_entry
zip_id = request.args.get('id')
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/local.py", line 343, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/local.py", line 302, in _get_current_object
return self.__local()
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/flask/globals.py", line 20, in _lookup_req_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
Def in the same file calling the log_entry def
def create_agencies(country_code, DB, session):
document = DB.find_one({'rb_account_id': RB_COUNTRIES_new[country_code]['rb_account_id']})
t2 = new_t2(session)
log_entry("test", "created", 1, "{'data':'hey'}", "")

py2neo (Neo4j) : py2neo.packages.httpstream.http.SocketError: Operation not permitted

I am running Neo4j 2.2.1 in ubuntu Amazon EC2 instance. When I am trying to connect through python using py2neo-2.0.7, I am getting following error :
py2neo.packages.httpstream.http.SocketError: Operation not permitted
I am able to access the web-interface through http://52.10.**.***:7474/browser/
CODE :-
from py2neo import Graph, watch, Node, Relationship
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
print url_graph_conn
my_conn = Graph(url_graph_conn)
babynames = my_conn.find("BabyName")
for babyname in babynames:
print 2
Error message :-
https://neo4j:password#52.10.**.***:7474/db/data/
Traceback (most recent call last):
File "C:\Users\rharoon002\eclipse_workspace\peace\peace\core\graphconnection.py", line 39, in <module>
for babyname in babynames:
File "C:\Python27\lib\site-packages\py2neo\core.py", line 770, in find
response = self.cypher.post(statement, parameters)
File "C:\Python27\lib\site-packages\py2neo\core.py", line 667, in cypher
metadata = self.resource.metadata
File "C:\Python27\lib\site-packages\py2neo\core.py", line 213, in metadata
self.get()
File "C:\Python27\lib\site-packages\py2neo\core.py", line 258, in get
response = self.__base.get(headers=headers, redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 966, in get
return self.__get_or_head("GET", if_modified_since, headers, redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 943, in __get_or_head
return rq.submit(redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 433, in submit
http, rs = submit(self.method, uri, self.body, self.headers)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 362, in submit
raise SocketError(code, description, host_port=uri.host_port)
py2neo.packages.httpstream.http.SocketError: Operation not permitted
You are trying to access neo4j via https on the standard port for http (7474):
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
The standard port for a https connection is 7473. Try:
url_graph_conn = "https://neo4j:password#52.10.**.***:7473/db/data/"
And make sure you can access the web interface via https:
https://52.10.**.***:7473/browser/
You can change/see the port settings in your neo4j-server.properties file.

Categories

Resources