how from service RPyC server refer to the Service specific client - python

CORE
The server part - Core, which is responsible for the registration of modules and the interaction between them. Core runs as ThreadedServer. CoreService provides registration modules. When registering I keep a list of Connections, then to use them. Module calls at the core function that it should call another module. But to use the list of connections does not work, the performance goes into an infinite loop.
class CoreService(rpyc.Service):
__modules = {}
def exposed_register_module(self, module_name):
if module_name in self.__modules:
return False
self.__modules[module_name] = self._conn
return True
def exposed_execute_query_module(self, module_name, attribute_name, args):
# TTTTTTTTTTHHHHHHHHHIIIIIIIISSSSSSSSSSSSSS
if module_name in self.__modules:
self.__modules[module_name].root
# return None
Run test
When you run the test I get in into a loop which is interrupted by a combination of keys and get the following output:
^CTraceback (most recent call last):
File "/home/kpv/perseus/control-lib/perseus_control_lib/module.py", line 67, in __getattr__
return self.__core_connector.root.execute_query_module(self.__proxy_module_name, name, args)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/netref.py", line 196, in __call__
return syncreq(_self, consts.HANDLE_CALL, args, kwargs)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/netref.py", line 71, in syncreq
return conn.sync_request(handler, oid, *args)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/protocol.py", line 438, in sync_request
self.serve(0.1)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/protocol.py", line 387, in serve
data = self._recv(timeout, wait_for_lock = True)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/protocol.py", line 344, in _recv
if self._channel.poll(timeout):
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/channel.py", line 43, in poll
return self.stream.poll(timeout)
File "/usr/local/lib/python2.7/dist-packages/rpyc/core/stream.py", line 41, in poll
rl, _, _ = select([self], [], [], timeout)
KeyboardInterrupt

Related

How to get list of events using the Python kubernetes API?

I am trying to obtain the list of events from a minikube cluster usigh the Python Kubernetes api using the following code:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
print(api.list_event_for_all_namespaces())
I am getting the following error:
C:\Users\lameg\kubesense>python test.py
Traceback (most recent call last):
File "C:\Users\lameg\kubesense\test.py", line 6, in <module>
print(api.list_event_for_all_namespaces())
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 651, in list_event_for_all_namespaces
return self.list_event_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api\events_v1beta1_api.py", line 758, in list_event_for_all_namespaces_with_http_info
return self.api_client.call_api(
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 348, in call_api
return self.__call_api(resource_path, method,
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 192, in __call_api
return_data = self.deserialize(response_data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 264, in deserialize
return self.__deserialize(data, response_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 639, in __deserialize_model
kwargs[attr] = self.__deserialize(value, attr_type)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in __deserialize
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 280, in <listcomp>
return [self.__deserialize(sub_data, sub_kls)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 303, in __deserialize
return self.__deserialize_model(data, klass)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\api_client.py", line 641, in __deserialize_model
instance = klass(**kwargs)
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 112, in __init__
self.event_time = event_time
File "C:\Users\lameg\miniforge3\lib\site-packages\kubernetes\client\models\v1beta1_event.py", line 291, in event_time
raise ValueError("Invalid value for `event_time`, must not be `None`") # noqa: E501
ValueError: Invalid value for `event_time`, must not be `None`
Any ideas ?
That looks like either a bug in the Python client, or a bug in the OpenAPI specification used to generate the client: clearly, null is a value for eventTime that is supported by the API.
I think the only workaround is to monkey-patch the kubernetes.client module so that it accepts null values. Something like this:
from kubernetes import config, client
config.load_kube_config()
api = client.EventsV1beta1Api()
# This is descriptor, see https://docs.python.org/3/howto/descriptor.html
class FakeEventTime:
def __get__(self, obj, objtype=None):
return obj._event_time
def __set__(self, obj, value):
obj._event_time = value
# Monkey-patch the `event_time` attribute of ` the V1beta1Event class.
client.V1beta1Event.event_time = FakeEventTime()
# Now this works.
events = api.list_event_for_all_namespaces()
The above code runs successfully against my OpenShift instance, whereas previously it would fail as you describe in your question.

Runtime Error when running a simple cuML code in a Dask environment

I'm trying to test a simple code using two remote workers. I don't know what is going on and what the error refers to.
The code is simple:
#!/usr/bin/python3
from cuml.dask.cluster import KMeans
from cuml.dask.datasets import make_blobs
from dask.distributed import Client
c = Client("dask-scheduler:8786")
centers = 5
X, _ = make_blobs(n_samples=10000, centers=centers)
k_means = KMeans(n_clusters=centers)
k_means.fit(X)
labels = k_means.predict(X)
It connects but when it tries to execute the cluster algorithm, it throws the following error:
Traceback (most recent call last):
File "test_cuml.py", line 15, in <module>
k_means.fit(X)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/common/memory_utils.py", line 93, in cupy_rmm_wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/dask/cluster/kmeans.py", line 161, in fit
comms.init(workers=data.workers)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 209, in init
wait=True,
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 2506, in run
return self.sync(self._run, function, *args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 869, in sync
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/utils.py", line 332, in sync
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/utils.py", line 315, in f
result[0] = yield future
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/tornado/gen.py", line 762, in run
value = future.result()
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/client.py", line 2443, in _run
raise exc.with_traceback(tb)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 429, in _func_init_all
_func_init_nccl(sessionId, uniqueId)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 484, in _func_init_nccl
n.init(nWorkers, uniqueId, wid)
File "cuml/raft/dask/common/nccl.pyx", line 151, in cuml.raft.dask.common.nccl.nccl.init
The workers are reporting this issue:
distributed.worker - INFO - Run out-of-band function '_func_init_all'
distributed.worker - WARNING - Run Failed
Function: _func_init_all
args: (b'\x95d$\x89\x9beI\xf5\xa7\x8c7M\xe8V[v', b'\x02\x00\xc8\xdd\x8fj\x07\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', False, {'tcp://dask-scheduler:40439': {'rank': 0}, 'tcp://dask-scheduler:39645': {'rank': 1}}, False, 0)
kwargs: {}
Traceback (most recent call last):
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/distributed/worker.py", line 4553, in run
result = await function(*args, **kwargs)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 429, in _func_init_all
_func_init_nccl(sessionId, uniqueId)
File "/opt/conda/envs/rapids/lib/python3.7/site-packages/cuml/raft/dask/common/comms.py", line 484, in _func_init_nccl
n.init(nWorkers, uniqueId, wid)
File "cuml/raft/dask/common/nccl.pyx", line 151, in cuml.raft.dask.common.nccl.nccl.init
RuntimeError: NCCL_ERROR: b'invalid usage'
Does anyone know what is happening or how to mitigate this? For me the error is not so clear. I tried with several versions of RAPIDS. IMPORTANT: I'm running in a docker environment sharing all GPUs (--gpus all) and network settings (--network host).

Error on writing to Google cloud spanner using Google cloud functions

I am trying to insert data into cloud spanner table using cloud functions but it is throwing the error given below.Reading data from cloud spanner is working properly but writing using both the Data Definition Language commands and batch.insert method both throws the same error. I am thinking its some kind of permissions problem! I don't know how to fix it?
Requirements file contains only google-cloud-spanner==1.7.1
Code running in cloud functions
import json
from google.cloud import spanner
INSTANCE_ID = 'AARISTA'
DATABASE_ID = 'main'
TABLE_NAME = 'userinfo'
dataDict = None
def new_user(request):
dataDict = json.loads(request.data)# Data is available in dict format
if dataDict['USER_ID']==None:
return "User id empty"
elif dataDict['IMEI'] == None:
return "Imei number empty"
elif dataDict['DEVICE_ID'] == None:
return "Device ID empty"
elif dataDict['NAME'] == None:
return "Name field is empty"
elif dataDict['VIRTUAL_PRIVATE_KEY']== None:
return "User's private key cant be empty"
else:
return insert_data(INSTANCE_ID,DATABASE_ID)
def insert_data(instance_id, database_id):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_user(transcation):
row_ct= transcation.execute_update("INSERT userinfo
(USER_ID,DEVICE_ID,IMEI,NAME,VIRTUAL_PRIVATE_KEY) VALUES"
"("+dataDict['USER_ID']+',
'+dataDict['DEVICE_ID']+', '+ dataDict['IMEI']+',
'+dataDict['NAME']+',
'+dataDict['VIRTUAL_PRIVATE_KEY']+")")
database.run_in_transaction(insert_user)
return 'Inserted data.'
Error logs on Cloud Functions
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 265, in get session = self._sessions.get_nowait()
File "/opt/python3.7/lib/python3.7/queue.py", line 198, in get_nowait return self.get(block=False)
File "/opt/python3.7/lib/python3.7/queue.py", line 167, in get raise Empty _queue.Empty
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable return callable_(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/grpc/_channel.py", line 547, in __call__ return _end_unary_response_blocking(state, call, False, None)
File "/env/local/lib/python3.7/site-packages/grpc/_channel.py", line 466, in _end_unary_response_blocking raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with: status = StatusCode.INVALID_ARGUMENT details = "Invalid CreateSession request." debug_error_string = "{"created":"#1547373361.398535906","description":"Error received from peer","file":"src/core/lib/surface/call.cc","file_line":1036,"grpc_message":"Invalid> CreateSession request.","grpc_status":3}" >
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 297, in run_http_function result = _function_handler.invoke_user_function(flask.request)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 199, in invoke_user_function return call_user_function(request_or_event)
File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 192, in call_user_function return self._user_function(request_or_event)
File "/user_code/main.py", line 21, in new_user return insert_data(INSTANCE_ID,DATABASE_ID)
File "/user_code/main.py", line 31, in insert_data database.run_in_transaction(insert_user)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/database.py", line 438, in run_in_transaction with SessionCheckout(self._pool) as session:
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 519, in __enter__ self._session = self._pool.get(**self._kwargs)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/pool.py", line 268, in get session.create()
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/session.py", line 116, in create session_pb = api.create_session(self._database.name, metadata=metadata, **kw)
File "/env/local/lib/python3.7/site-packages/google/cloud/spanner_v1/gapic/spanner_client.py", line 276, in create_session request, retry=retry, timeout=timeout, metadata=metadata
File "/env/local/lib/python3.7/site-packages/google/api_core/gapic_v1/method.py", line 143, in __call__ return wrapped_func(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/google/api_core/retry.py", line 270, in retry_wrapped_func on_error=on_error,
File "/env/local/lib/python3.7/site-packages/google/api_core/retry.py", line 179, in retry_target return target()
File "/env/local/lib/python3.7/site-packages/google/api_core/timeout.py", line 214, in func_with_timeout return func(*args, **kwargs)
File "/env/local/lib/python3.7/site-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.InvalidArgument: 400 Invalid CreateSession request.
I tried to reproduce this but it seems to work for me as a Python 3.7 function. I used the latest google-cloud-spanner library in requirements.txt.
While I am unsure what would be causing your error I did notice a few other things.
It seemed odd to declare a global dataDict and not use the one constructed and pass it. Instead I added that as a param to the insert method.
The spacing of the query was a bit odd and the use of single and double quotes was odd. this made it hard to parse visually. As the function runs as python 3.7 you can also use f-strings which likely would make it even more readable.
Here is the code I ran in a function that seemed to work.
import json
from google.cloud import spanner
INSTANCE_ID = 'testinstance'
DATABASE_ID = 'testdatabase'
TABLE_ID = 'userinfo'
def new_user(request):
data = { 'USER_ID': '10', 'DEVICE_ID': '11' }
return insert_data(INSTANCE_ID, DATABASE_ID, data)
def insert_data(instance_id, database_id, data):
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def insert_user(transaction):
query = f"INSERT {TABLE_ID} (USER_ID,DEVICE_ID) VALUES ({data['USER_ID']},{data['DEVICE_ID']})"
row_ct = transaction.execute_update(query)
database.run_in_transaction(insert_user)
return 'Inserted data.'

Python - RuntimeError: working outside of request context

Trying to get the GET parameters from the URL. I have it working in my __init__.py file, but in a different file its not working.
I tried to use with app.app_context(): but I am still getting the same issue.
def log_entry(entity, type, entity_id, data, error):
with app.app_context():
zip_id = request.args.get('id')
RuntimeError: working outside of request context
Any suggestions?
Additional Info:
This is using Flask web framework which is setup as a service (API).
Example URL the user would hit http://website.com/api/endpoint?id=1
As mentioned above using `zip_id = request.args.get('id') works fine in the main file but I am in runners.py (just another file with definitions in)
Full traceback:
Debugging middleware caught exception in streamed response at a point where response headers were already sent.
Traceback (most recent call last):
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/wsgi.py", line 703, in __next__
return self._next()
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/wrappers.py", line 81, in _iter_encoded
for item in iterable:
File "/Users/ereeve/Documents/TechSol/pi-automation-api/automation_api/runners.py", line 341, in create_agencies
log_entry("test", "created", 1, "{'data':'hey'}", "")
File "/Users/ereeve/Documents/TechSol/pi-automation-api/automation_api/runners.py", line 315, in log_entry
zip_id = request.args.get('id')
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/local.py", line 343, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/werkzeug/local.py", line 302, in _get_current_object
return self.__local()
File "/Users/ereeve/.virtualenvs/pi-automation-api/lib/python2.7/site-packages/flask/globals.py", line 20, in _lookup_req_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
Def in the same file calling the log_entry def
def create_agencies(country_code, DB, session):
document = DB.find_one({'rb_account_id': RB_COUNTRIES_new[country_code]['rb_account_id']})
t2 = new_t2(session)
log_entry("test", "created", 1, "{'data':'hey'}", "")

python multiprocessing mssql cursor

Is there anyway to connectionpool or use a connection across multiple processes?
I am trying to use one connection across multiple processes. Here is the code (running on python 2.7, pyodbc).
# Import custom python packages
import pathos.multiprocessing as mp
import pyodbc
class MyManagerClass(object):
def __init__(self):
self.conn = None
self.result = []
def connect_to_db(self):
conn = pyodbc.connect("DSN=cpmeast;UID=dntcore;PWD=dntcorevs2")
cursor = conn.cursor()
self.conn = conn
return cursor
def read_data(self, *args):
cursor = args[0][0]
data = args[0][1]
print 'Running query'
cursor.execute("WAITFOR DELAY '00:00:02';select GETDATE(), '"+data+"';")
self.result.append(cursor.fetchall())
def read_data(*args):
print 'Running query', args
# cursor.execute("WAITFOR DELAY '00:00:02';select GETDATE(), '"+data+"';")
def main():
dbm = MyManagerClass()
conn = pyodbc.connect("DSN=cpmeast;UID=dntcore;PWD=dntcorevs2")
cursor = conn.cursor()
pool = mp.ProcessingPool(4)
for i in pool.imap(dbm.read_data, ((cursor, 'foo'), (cursor, 'bar'))):
print i
pool.close()
pool.join()
cursor.close();
dbm.conn.close()
print 'Result', dbm.result
print 'Closed'
if __name__ == '__main__':
main()
I am getting the following error:
Process PoolWorker-1:
Traceback (most recent call last):
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/process.py", line 227, in _bootstrap
self.run()
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/process.py", line 85, in run
self._target(*self._args, **self._kwargs)
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/pool.py", line 54, in worker
for job, i, func, args, kwds in iter(inqueue.get, None):
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/queue.py", line 327, in get
return recv()
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/dill-0.2.4-py2.7.egg/dill/dill.py", line 209, in loads
return load(file)
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/dill-0.2.4-py2.7.egg/dill/dill.py", line 199, in load
obj = pik.load()
File "/home/amit/envs/py_env_clink/lib/python2.7/pickle.py", line 858, in load
dispatch[key](self)
File "/home/amit/envs/py_env_clink/lib/python2.7/pickle.py", line 1083, in load_newobj
obj = cls.__new__(cls, *args)
TypeError: object.__new__(pyodbc.Cursor) is not safe, use pyodbc.Cursor.__new__()
Process PoolWorker-2:
Traceback (most recent call last):
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/process.py", line 227, in _bootstrap
self.run()
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/process.py", line 85, in run
self._target(*self._args, **self._kwargs)
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/pool.py", line 54, in worker
for job, i, func, args, kwds in iter(inqueue.get, None):
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/processing/queue.py", line 327, in get
return recv()
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/dill-0.2.4-py2.7.egg/dill/dill.py", line 209, in loads
return load(file)
File "/home/amit/envs/py_env_clink/lib/python2.7/site-packages/dill-0.2.4-py2.7.egg/dill/dill.py", line 199, in load
obj = pik.load()
File "/home/amit/envs/py_env_clink/lib/python2.7/pickle.py", line 858, in load
dispatch[key](self)
File "/home/amit/envs/py_env_clink/lib/python2.7/pickle.py", line 1083, in load_newobj
obj = cls.__new__(cls, *args)
TypeError: object.__new__(pyodbc.Cursor) is not safe, use pyodbc.Cursor.__new__()
The problem is with the Pickle stage. Pickle doesn't know inherently how to serialize a connection. Consider:
import pickle
import pymssql
a = {'hello': 'world'}
server = 'server'
username = 'username'
password = 'password'
database = 'database'
conn = pymssql.connect(host=server,user=username,password=password,database=database)
with open('filename.pickle', 'wb') as handle:
pickle.dump(conn, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('filename.pickle', 'rb') as handle:
b = pickle.load(handle)
print(a == b)
This results in the following error message:
Traceback (most recent call last):
File "pickle_ex.py", line 10, in <module>
pickle.dump(conn, handle, protocol=pickle.HIGHEST_PROTOCOL)
File "stringsource", line 2, in _mssql.MSSQLConnection.__reduce_cython__
TypeError: no default __reduce__ due to non-trivial __cinit__
But if you replace conn with a in pickle.dump, the code will run and print out True.
You may be able to define a custom reduce method in your class, but I wouldn't try it, considering how this would result in temp tables acting like global temp tables but only accessible across these processes (which shouldn't be allowed to transpire) anyways.
Links:
My pickle code is from here: How can I use pickle to save a dict?

Categories

Resources