We have a db_connection.py module which looks like below.
from os import getenv
from collections import OrderedDict
import asyncpg
from tornado import gen
from util.utils import custom_exception
import time
db_name = getenv("DB_NAME", "XXX")
db_user = getenv("DB_USER", "YYY")
db_password = getenv("DB_PASSWORD", "ZZZ")
db_host = getenv("DB_HOST", "localhost")
db_port = getenv("DB_PORT", "5432")
db_args = dict(user=db_user, password=db_password,
database=db_name, host=db_host, port=db_port)
class db_connection(object):
def __init__(self, **db_args):
self.db_pool = []
self.init(**db_args)
# create a pool ref coroutine where we can get connections from
# needs user, password, database, host, port
#gen.coroutine
def init(self,**db_args):
self.db_pool = yield asyncpg.create_pool(**db_args)
#gen.coroutine
def exit(self):
yield self.db_pool.close()
# run a queer
async def run(self,q):
if not self.db_pool:
self.init() #try again
if not self.db_pool:
raise custom_exception(reason='Database connection error', status_code=500)
async with self.db_pool.acquire() as connection:
ret = await connection.fetch(q)
# format to match the old data types
return [OrderedDict(e) for e in ret]
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit()
self.db_pool = []
In the app, we initialize the db_connection object as follows:
from util.db_connection import db_args, db_connection
(...)
if __name__ == "__main__":
AsyncIOMainLoop().install()
dbc = db_connection(**db_args)
# What we have here is a rather basic tornado app
app = make_app()
app.listen(port=8080)
asyncio.get_event_loop().run_forever()
The problem we saw is that, and this is something I can not explain, it seems that the connection string (db_args) is not always set and sometimes appears to be an empty dictionary; asyncpg then falls back and tries to get a connection string from the linux user associated to the container via os.
Mar 5 16:59:50 ABC: ERROR:
tornado.application:Future <tornado.concurrent.Future object at 0x7eff6005c320>
exception was never retrieved:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/abc/def/db_connection.py", line 26, in init
self.db_pool = yield asyncpg.create_pool(**db_args)
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info) Mar 5 16:59:50 ABC: File "<string>", line 4, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 307, in wrapper
yielded = next(result)
File "<string>", line 6, in _wrap_awaitable
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 356, in _async__init__
await first_ch.connect()
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 126, in connect
**self._connect_kwargs)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 1498, in connect
max_cacheable_statement_size=max_cacheable_statement_size)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 296, in _connect
addrs, params, config=_parse_connect_arguments(timeout=timeout, **kwargs)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 242, in _parse_connect_arguments
server_settings=server_settings)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 152, in _parse_connect_dsn_and_args
user=getpass.getuser()
File "/usr/local/lib/python3.6/getpass.py", line 169, in getuser
return pwd.getpwuid(os.getuid())[0]
KeyError: 'getpwuid(): uid not found: 10001
Related
I encountered the following problem, when I set a larger world_size
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/__init__.py", line 195, in init_rpc
_init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options)
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/__init__.py", line 229, in _init_rpc_backend
rpc_agent = backend_registry.init_backend(
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/backend_registry.py", line 106, in init_backend
return backend.value.init_backend_handler(*args, **kwargs)
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/backend_registry.py", line 315, in _tensorpipe_init_backend_handler
api._all_gather(None, timeout=rpc_constants.DEFAULT_RPC_TIMEOUT_SEC)
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/api.py", line 77, in wrapper
return func(*args, **kwargs)
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/api.py", line 204, in _all_gather
rpc_sync(
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/api.py", line 77, in wrapper
return func(*args, **kwargs)
File "/root/anaconda3/envs/final/lib/python3.9/site-packages/torch/distributed/rpc/api.py", line 765, in rpc_sync
return fut.wait()
RuntimeError: connect: Resource temporarily unavailable (this error originated at tensorpipe/common/socket.cc:114)
And this is my code:
import os
import time
from entity.Server import Server
import torch.multiprocessing as mp
from torch.distributed import rpc
from utils.options import args_parser
import torch.distributed as dist
SERVER_NAME = "Server"
CLIENT_NAME = "Client{}"
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
def run(rank, args):
if rank == 0:
rpc.init_rpc(SERVER_NAME, rank=rank, world_size=args.world_size)
# server = Server(args)
else:
rpc.init_rpc(CLIENT_NAME.format(rank), rank=rank, world_size=args.world_size)
rpc.shutdown()
if __name__ == "__main__":
args = args_parser()
mp.spawn(
run,
args=(args, ),
nprocs=args.world_size,
join=True
)
When I set the world_size value above 25, the code fails.
I think this may be a problem with the linux system configuration, does anyone know how to configure or fix this?
I have a simple Flask app set up which runs with the command flask run. I'd like to be able to run this app in a Docker container, which I'm trying to do using a gunicorn server. However, when I try to run using gunicorn I'm seeing error messages. I'm running gunicorn --worker-class eventlet -w 1 app:app which I got from their documentation
I have a simple flask-socketio app:
.
├── app.py
└── templates
└── index.html
Here's the contents of app.py:
import os
import sys
from eventlet import patcher, support
import six
select = patcher.original('select')
time = patcher.original('time')
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
if getattr(select, 'kqueue', None) is None:
raise ImportError('No kqueue implementation found in select module')
FILTERS = {READ: select.KQ_FILTER_READ,
WRITE: select.KQ_FILTER_WRITE}
class Hub(BaseHub):
MAX_EVENTS = 100
def __init__(self, clock=None):
super(Hub, self).__init__(clock)
self._events = {}
self._init_kqueue()
def _init_kqueue(self):
self.kqueue = select.kqueue()
self._pid = os.getpid()
def _reinit_kqueue(self):
self.kqueue.close()
self._init_kqueue()
kqueue = self.kqueue
events = [e for i in six.itervalues(self._events)
for e in six.itervalues(i)]
kqueue.control(events, 0, 0)
def _control(self, events, max_events, timeout):
try:
return self.kqueue.control(events, max_events, timeout)
except (OSError, IOError):
# have we forked?
if os.getpid() != self._pid:
self._reinit_kqueue()
return self.kqueue.control(events, max_events, timeout)
raise
def add(self, evtype, fileno, cb, tb, mac):
listener = super(Hub, self).add(evtype, fileno, cb, tb, mac)
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno, FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
super(Hub, self).remove(listener)
raise
return listener
def _delete_events(self, events):
del_events = [
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events
]
self._control(del_events, 0, 0)
def remove(self, listener):
super(Hub, self).remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
event = self._events[fileno].pop(evtype, None)
if event is None:
return
try:
self._delete_events((event,))
except OSError:
pass
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError:
pass
except OSError:
pass
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
result = self._control([], self.MAX_EVENTS, seconds)
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
for event in result:
fileno = event.ident
evfilt = event.filter
try:
if evfilt == FILTERS[READ]:
readers.get(fileno, noop).cb(fileno)
if evfilt == FILTERS[WRITE]:
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
support.clear_sys_exc_info()
Here are the errors I'm seeing when running the gunicorn command:
Exception ignored in: <function _after_fork at 0x1121fc1f0>
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 1510, in _after_fork
thread._reset_internal_locks(True)
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 845, in _reset_internal_locks
self._started._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 527, in _at_fork_reinit
self._cond._at_fork_reinit()
File "/usr/local/Cellar/python#3.9/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 253, in _at_fork_reinit
self._lock._at_fork_reinit()
AttributeError: 'Semaphore' object has no attribute '_at_fork_reinit'
[2022-01-31 20:57:29 +0000] [51963] [INFO] Booting worker with pid: 51963
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 460, in fire_timers
timer()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/timer.py", line 59, in __call__
cb(*args, **kw)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 219, in main
result = function(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 78, in _eventlet_serve
conn, addr = sock.accept()
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 228, in accept
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenio/base.py", line 206, in _trampoline
return trampoline(fd, read=read, write=write, timeout=timeout,
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/__init__.py", line 160, in trampoline
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 55, in add
self._control([event], 0, 0)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
OSError: [Errno 9] Bad file descriptor
[2022-01-31 20:57:29 +0000] [51963] [ERROR] Exception in worker process
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/gunicorn/arbiter.py", line 589, in spawn_worker
worker.init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 134, in init_process
super().init_process()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/base.py", line 142, in init_process
self.run()
File "/usr/local/lib/python3.9/site-packages/gunicorn/workers/geventlet.py", line 166, in run
eventlet.sleep(1.0)
File "/usr/local/lib/python3.9/site-packages/eventlet/greenthread.py", line 36, in sleep
hub.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 297, in switch
return self.greenlet.switch()
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/hub.py", line 349, in run
self.wait(sleep_time)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 100, in wait
result = self._control([], self.MAX_EVENTS, seconds)
File "/usr/local/lib/python3.9/site-packages/eventlet/hubs/kqueue.py", line 41, in _control
return self.kqueue.control(events, max_events, timeout)
I'm not sure if I'm using the wrong command or if there is some other issue preventing me from using gunicorn?
Code:
from aiohttp import web
from aiortc.mediastreams import MediaStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
import asyncio
import json
import os
from multiprocessing import Process, freeze_support
from queue import Queue
import sys
import threading
from time import sleep
import fractions
import time
class RadioServer(Process):
def __init__(self,q):
super().__init__()
self.q = q
self.ROOT = os.path.dirname(__file__)
self.pcs = []
self.channels = []
self.stream_offers = []
self.requests = []
def run(self):
self.app = web.Application()
self.app.on_shutdown.append(self.on_shutdown)
self.app.router.add_get("/", self.index)
self.app.router.add_get("/radio.js", self.javascript)
self.app.router.add_get("/jquery-3.5.1.min.js", self.jquery)
self.app.router.add_post("/offer", self.offer)
threading.Thread(target=self.fill_the_queues).start()
web.run_app(self.app, access_log=None, host="192.168.1.20", port="8080", ssl_context=None)
def fill_the_queues(self):
while(True):
frame = self.q.get()
for stream_offer in self.stream_offers:
stream_offer.q.put(frame)
async def index(self,request):
content = open(os.path.join(self.ROOT, "index.html"), encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
async def javascript(self,request):
content = open(os.path.join(self.ROOT, "radio.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def jquery(self,request):
content = open(os.path.join(self.ROOT, "jquery-3.5.1.min.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(self,request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
self.pcs.append(pc)
self.requests.append(request)
# prepare epalxeis media
self.stream_offers.append(CustomRadioStream())
pc.addTrack(self.stream_offers[-1])
#pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
if pc.iceConnectionState == "failed":
self.pcs.remove(pc)
self.requests.remove(request)
print(str(request.remote)+" disconnected from radio server")
print("Current peer connections:"+str(len(self.pcs)))
# handle offer
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(content_type="application/json",text=json.dumps({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}))
async def on_shutdown(self,app):
# close peer connections
if self.pcs:
coros = [pc.close() for pc in self.pcs]
await asyncio.gather(*coros)
self.pcs = []
self.channels = []
self.stream_offers = []
"""
some other classes here such as CustomRadioStream and RadioOutputStream
"""
if __name__ == "__main__":
freeze_support()
q = Queue()
custom_server_child_process = RadioServer(q)
custom_server_child_process.start()
Error
Traceback (most recent call last):
File "123.py", line 106, in <module>
custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
What I am doing wrong?
If I call the run function (instead of start) directly, then there is no problem, but i want to use processing for this class.
Edit: Ok with multiprocessing.Queue works fine but now with similar code there is this error:
$ python "Papinhio_player.py"
Traceback (most recent call last):
File "Papinhio_player.py", line 3078, in <module>
program = PapinhioPlayerCode()
File "Papinhio_player.py", line 250, in __init__
self.manage_decks_instance = Manage_Decks(self)
File "C:\python\scripts\Papinhio player\src\main\python_files/manage_decks.py"
, line 356, in __init__
self.custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
File "stringsource", line 2, in av.audio.codeccontext.AudioCodecContext.__redu
ce_cython__
TypeError: self.parser,self.ptr cannot be converted to a Python object for pickl
ing
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 116, in
spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 126, in
_main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Some objects cannot be serialized then unserialized.
The stack trace you posted mentions :
TypeError: cannot pickle '_thread.lock' object
a lock, which holds a state in memory and gives guarantees that no other process can own the same lock at the same moment, is typically a very bad candidate for this operation -- what should be created when you deserialize it ?
To fix this : choose a way to select the relevant fields of the object you want to serialize, and pickle/unpickle that part.
I am using Python 3.5.1 this is errors:
Traceback (most recent call last):
File "C:\Users\KORKUSUZ\Desktop\twitter-realtime-heatmap-master\tstream.py", line 55, in <module>
streamer.filter(track = setTerms)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 447, in filter
self._start(async)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 361, in _start
self._run()
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 294, in _run
raise exception
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 263, in _run
self._read_loop(resp)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 324, in _read_loop
self._data(next_status_obj)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 297, in _data
if self.listener.on_data(data) is False:
File "C:\Users\KORKUSUZ\Desktop\twitter-realtime-heatmap-master\tstream.py", line 48, in on_data
col.insert(json.loads(data))
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\collection.py", line 2203, in insert
with self._socket_for_writes() as sock_info:
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\contextlib.py", line 59, in __enter__
return next(self.gen)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\mongo_client.py", line 716, in _get_socket
server = self._get_topology().select_server(selector)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\topology.py", line 142, in select_server
address))
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\topology.py", line 118, in select_servers
self._error_message(selector))
pymongo.errors.ServerSelectionTimeoutError: localhost:27017: [WinError 10061] Failed to connect to the target machine actively refused because
## This is my Code ##
import tweepy
import json
from tweepy.streaming import StreamListener
from tweepy import Stream
from bson import json_util
from tweepy.utils import import_simplejson
try:
from pymongo.connection import Connection
except ImportError as e:
from pymongo import MongoClient as Connection
json = import_simplejson()
mongocon = Connection()
db = mongocon.tstream
col = db.tweets_tail
consumer_key="FfRhUzvXKlnS9sDWfGZqxECzQ"
consumer_secret="uhuiApn3IyzXWw34kvl8ia1DzgAaPyk2xuxXG3HtZgEIbFrWSx"
access_token="185166166-o4xUFWdjoL84K1MscTot4SfH9DZnkG5maYbhZZ6Z"
access_token_secret="2vrydY3ogq5vU8Mkqp3CAqeMmlDuRStU6iSDgNbPxDkdS"
auth1 = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth1.set_access_token(access_token, access_token_secret)
class StreamListener(tweepy.StreamListener):
mongocon = Connection()
db = mongocon.tstream
col = db.tweets
json = import_simplejson()
def on_status(self, tweet):
print ('Ran on_status')
def on_error(self, status_code):
return False
def on_data(self, data):
if data[0].isdigit():
pass
else:
col.insert(json.loads(data))
print(json.loads(data))
l = StreamListener()
streamer = tweepy.Stream(auth=auth1, listener=l)
setTerms = ["bigdata", "devops", "hadoop", "twitter"]
streamer.filter(track = setTerms)
Im trying to use alchimia for get asynchronous API for DB. Trying to make a simple request to DB, like that:
def authorization(self, data):
"""
Checking user with DB
"""
def __gotResult(user):
yield engine.execute(sqlalchemy.select([Users]).where(Users.name == user))
result = __gotResult(data['user'])
log.msg("[AUTH] User=%s trying to auth..." % data['user'])
data, result_msg = commands.AUTH(result, data)
log.msg(result_msg)
return data
And cant understand - what i doing wrong? Maybe issue in option for engine (where reactor=[])?
Source code:
import sys
from json import dumps, loads
import sqlalchemy
from twisted.internet import reactor, ssl
from twisted.python import log, logfile
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
import commands
from db.tables import Users
from alchimia import TWISTED_STRATEGY
log_file = logfile.LogFile("service.log", ".")
log.startLogging(log_file)
engine = sqlalchemy.create_engine('postgresql://test:test#localhost/testdb', pool_size=20, max_overflow=0,strategy=TWISTED_STRATEGY, reactor=[])
class DFSServerProtocol(WebSocketServerProtocol):
commands = commands.commands_user
def __init__(self):
self.commands_handlers = self.__initHandlersUser()
def __initHandlersUser(self):
handlers = commands.commands_handlers_server
handlers['AUTH'] = self.authorization
handlers['READ'] = None
handlers['WRTE'] = None
handlers['DELT'] = None
handlers['RNME'] = None
handlers['SYNC'] = None
handlers['LIST'] = None
return handlers
def authorization(self, data):
"""
Checking user with DB
"""
def __gotResult(user):
yield engine.execute(sqlalchemy.select([Users]).where(Users.name == data['user']))
result = __gotResult(data['user'])
log.msg("[AUTH] User=%s trying to auth..." % data['user'])
data, result_msg = commands.AUTH(result, data)
log.msg(result_msg)
return data
def onMessage(self, payload, isBinary):
json_data = loads(payload)
json_auth = json_data['auth']
json_cmd = json_data['cmd']
if json_auth == False:
if json_cmd == 'AUTH':
json_data = self.commands_handlers['AUTH'](json_data)
# for authorized users
else:
if json_cmd in commands.commands_user.keys():
if self.commands_handlers[json_cmd] is not None:
json_data = self.commands_handlers[json_cmd](json_data)
else:
json_data['error'] = '%s command is not already realized...' % json_cmd
else:
json_data['auth'] = False
json_data['error'] = 'This command is not supported on server...'
response = dumps(json_data)
self.sendMessage(str(response))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key', 'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:9000", debug = debug, debugCodePaths = debug)
factory.protocol = DFSServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory, contextFactory)
webdir = File("./web/")
webdir.contentTypes['.crt'] = 'application/x-x509-ca-cert'
web = Site(webdir)
reactor.listenSSL(8080, web, contextFactory)
#reactor.listenTCP(8080, web)
reactor.run()
Traceback:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/twisted/python/log.py", line 88, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/usr/local/lib/python2.7/dist-packages/twisted/python/log.py", line 73, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/usr/local/lib/python2.7/dist-packages/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python2.7/dist-packages/twisted/python/context.py", line 81, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 215, in doRead
return self._dataReceived(data)
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 221, in _dataReceived
rval = self.protocol.dataReceived(data)
File "/usr/local/lib/python2.7/dist-packages/twisted/protocols/tls.py", line 419, in dataReceived
self._flushReceiveBIO()
File "/usr/local/lib/python2.7/dist-packages/twisted/protocols/tls.py", line 389, in _flushReceiveBIO
ProtocolWrapper.dataReceived(self, bytes)
File "/usr/local/lib/python2.7/dist-packages/twisted/protocols/policies.py", line 120, in dataReceived
self.wrappedProtocol.dataReceived(data)
File "/usr/local/lib/python2.7/dist-packages/autobahn/twisted/websocket.py", line 78, in dataReceived
self._dataReceived(data)
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 1270, in _dataReceived
self.consumeData()
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 1286, in consumeData
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 1445, in processData
return self.processDataHybi()
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 1758, in processDataHybi
fr = self.onFrameEnd()
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 1887, in onFrameEnd
self._onMessageEnd()
File "/usr/local/lib/python2.7/dist-packages/autobahn/twisted/websocket.py", line 107, in _onMessageEnd
self.onMessageEnd()
File "/usr/local/lib/python2.7/dist-packages/autobahn/websocket/protocol.py", line 734, in onMessageEnd
self._onMessage(payload, self.message_is_binary)
File "/usr/local/lib/python2.7/dist-packages/autobahn/twisted/websocket.py", line 110, in _onMessage
self.onMessage(payload, isBinary)
File "server.py", line 84, in onMessage
json_data = self.commands_handlers['AUTH'](json_data)
File "server.py", line 68, in authorization
data, result_msg = commands.AUTH(result, data)
File "/home/relrin/code/Helenae/helenae/commands.py", line 68, in AUTH
if result['name'] == data['user']:
exceptions.TypeError: 'generator' object has no attribute '__getitem__'
I think you are missing an #inlineCallbacks around __gotResult() That might not help you quite enough, though; since a single statement generator wrapped with inlineCallbacks is sort of pointless. You should get used to working with explicit deferred handling anyway. Lets pull this apart:
def authorization(self, data):
"""
Checking user with DB
"""
# engine.execute already gives us a deferred, will grab on to that.
user = data['user']
result_d = engine.execute(sqlalchemy.select([Users]).where(Users.name == user))
# we don't have the result in authorization,
# we need to wrap any code that works with its result int a callback.
def result_cb(result):
data, result_msg = commands.AUTH(result, data)
return data
result_d.addCallback(result_cb)
# we want to pass the (asynchronous) result out, it's hiding in our deferred;
# so we return *that* instead; callers need to add more callbacks to it.
return result_d
If you insist; we can squish this down into an inline callbacks form:
from twisted.internet.defer import inlineCallbacks, returnValue
#inlineCallbacks
def authorization(self, data):
user = data['user']
result = yield engine.execute(sqlalchemy.select([Users]).where(Users.name == user))
data, result_msg = commands.AUTH(result, data)
yield returnValue(data)
as before, though, authorization() is asynchronous; and must be since engine.execute is async. to use it, you must attach a callback to the deferred it returns (although the caller may also yield it if it is also wrapped in inlineCallbacks