Python - Cannot connect Python to pymongo - python

I am using Python 3.5.1 this is errors:
Traceback (most recent call last):
File "C:\Users\KORKUSUZ\Desktop\twitter-realtime-heatmap-master\tstream.py", line 55, in <module>
streamer.filter(track = setTerms)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 447, in filter
self._start(async)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 361, in _start
self._run()
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 294, in _run
raise exception
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 263, in _run
self._read_loop(resp)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 324, in _read_loop
self._data(next_status_obj)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\tweepy-3.6.0-py3.5.egg\tweepy\streaming.py", line 297, in _data
if self.listener.on_data(data) is False:
File "C:\Users\KORKUSUZ\Desktop\twitter-realtime-heatmap-master\tstream.py", line 48, in on_data
col.insert(json.loads(data))
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\collection.py", line 2203, in insert
with self._socket_for_writes() as sock_info:
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\contextlib.py", line 59, in __enter__
return next(self.gen)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\mongo_client.py", line 716, in _get_socket
server = self._get_topology().select_server(selector)
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\topology.py", line 142, in select_server
address))
File "C:\Users\KORKUSUZ\AppData\Local\Programs\Python\Python35\lib\site-packages\pymongo-3.2.2-py3.5-win-amd64.egg\pymongo\topology.py", line 118, in select_servers
self._error_message(selector))
pymongo.errors.ServerSelectionTimeoutError: localhost:27017: [WinError 10061] Failed to connect to the target machine actively refused because

## This is my Code ##
import tweepy
import json
from tweepy.streaming import StreamListener
from tweepy import Stream
from bson import json_util
from tweepy.utils import import_simplejson
try:
from pymongo.connection import Connection
except ImportError as e:
from pymongo import MongoClient as Connection
json = import_simplejson()
mongocon = Connection()
db = mongocon.tstream
col = db.tweets_tail
consumer_key="FfRhUzvXKlnS9sDWfGZqxECzQ"
consumer_secret="uhuiApn3IyzXWw34kvl8ia1DzgAaPyk2xuxXG3HtZgEIbFrWSx"
access_token="185166166-o4xUFWdjoL84K1MscTot4SfH9DZnkG5maYbhZZ6Z"
access_token_secret="2vrydY3ogq5vU8Mkqp3CAqeMmlDuRStU6iSDgNbPxDkdS"
auth1 = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth1.set_access_token(access_token, access_token_secret)
class StreamListener(tweepy.StreamListener):
mongocon = Connection()
db = mongocon.tstream
col = db.tweets
json = import_simplejson()
def on_status(self, tweet):
print ('Ran on_status')
def on_error(self, status_code):
return False
def on_data(self, data):
if data[0].isdigit():
pass
else:
col.insert(json.loads(data))
print(json.loads(data))
l = StreamListener()
streamer = tweepy.Stream(auth=auth1, listener=l)
setTerms = ["bigdata", "devops", "hadoop", "twitter"]
streamer.filter(track = setTerms)

Related

Tornado is throwing stream closed error in chalice python

I am trying out AWS Neptune for the first time using Chalice.
This is the entire error
Traceback (most recent call last):
File "/var/task/chalice/app.py", line 1104, in _get_view_function_response
response = view_function(**function_args)
File "/var/task/app.py", line 44, in getPosts
raise e
File "/var/task/app.py", line 37, in getPosts
result = g.V().has('name', 'test1').toList()
File "/var/task/gremlin_python/process/traversal.py", line 58, in toList
return list(iter(self))
File "/var/task/gremlin_python/process/traversal.py", line 48, in __next__
self.traversal_strategies.apply_strategies(self)
File "/var/task/gremlin_python/process/traversal.py", line 573, in apply_strategies
traversal_strategy.apply(traversal)
File "/var/task/gremlin_python/driver/remote_connection.py", line 149, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/var/task/gremlin_python/driver/driver_remote_connection.py", line 55, in submit
result_set = self._client.submit(bytecode)
File "/var/task/gremlin_python/driver/client.py", line 111, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/var/task/gremlin_python/driver/client.py", line 127, in submitAsync
return conn.write(message)
File "/var/task/gremlin_python/driver/connection.py", line 55, in write
self.connect()
File "/var/task/gremlin_python/driver/connection.py", line 45, in connect
self._transport.connect(self._url, self._headers)
File "/var/task/gremlin_python/driver/tornado/transport.py", line 36, in connect
lambda: websocket.websocket_connect(url))
File "/var/task/tornado/ioloop.py", line 576, in run_sync
return future_cell[0].result()
tornado.simple_httpclient.HTTPStreamClosedError: Stream closed
and here is my code
import logging
from chalice import Chalice, BadRequestError, NotFoundError
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T, P, Operator
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from datetime import datetime
app = Chalice(app_name='chalice-neptune')
app.debug = True
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def setup_graph():
try:
graph = Graph()
connstring = 'ws://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
g = graph.traversal().withRemote(DriverRemoteConnection(connstring, 'g'))
logging.info('Connected to Neptune')
except Exception as e:
logging.error(e, exc_info = True)
raise BadRequestError("Could not connect to Neptune")
return g
#app.route('/getPosts')
def getPosts():
g = setup_graph()
try:
result = g.V().has('name', 'test1').toList()
response = {
'status_code': 200,
'data': result
}
except Exception as e:
raise e
return response
Any one who have tried this?
I have followed the example found in this bucket gremlin-python-example
I know I have not missed anything from the example but it is still throwing stream closed error.
Apparently the only thing I changed was my connection string and it is now working fine.
connstring = 'wss://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
I changed it from ws to wss.
As to the difference between the two you can refer to this answer
Difference between ws and wss?

Issues with Odoo v13 UID/API

Having an issue with the api for Odoo v13. I am able to get the server info but for some reason the uid is not being returned
import xmlrpc.client
url ="localhost:8069"
db = "pnv3"
username = "test"
password = "test"
common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))
print(common.version())
uid = common.authenticate(db, username, password, url)
print(uid)
getting this error
Traceback (most recent call last):
File "C:/Users/Web Content/.PyCharmCE2019.3/config/scratches/scratch.py", line 11, in <module>
uid = common.authenticate(db, username, password, url)
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 1112, in __call__
return self.__send(self.__name, args)
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 1452, in __request
verbose=self.__verbose
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 1154, in request
return self.single_request(host, handler, request_body, verbose)
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 1170, in single_request
return self.parse_response(resp)
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 1342, in parse_response
return u.close()
File "C:\Users\Web Content\AppData\Local\Programs\Python\Python37\lib\xmlrpc\client.py", line 656, in close
raise Fault(**self._stack[0])
xmlrpc.client.Fault: <Fault 1: 'Traceback (most recent call last):\n File "/odoo/odoo-server/odoo/modules/registry.py", line 59, in __new__\n return cls.registries[db_name]\n File "/odoo/odoo-server/odoo/tools/func.py", line 69, in wrapper\n return func(self, *args, **kwargs)\n File "/odoo/odoo-server/odoo/tools/lru.py", line 44, in __getitem__\n a = self.d[obj].me\nKeyError: \'pnv3\'\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File "/odoo/odoo-server/odoo/addons/base/controllers/rpc.py", line 63, in xmlrpc_2\n response = self._xmlrpc(service)\n File "/odoo/odoo-server/odoo/addons/base/controllers/rpc.py", line 43, in _xmlrpc\n result = dispatch_rpc(service, method, params)\n File "/odoo/odoo-server/odoo/http.py", line 138, in dispatch_rpc\n result = dispatch(method, params)\n File "/odoo/odoo-server/odoo/service/common.py", line 61, in dispatch\n return g[exp_method_name](*params)\n File "/odoo/odoo-server/odoo/service/common.py", line 30, in exp_authenticate\n res_users = odoo.registry(db)[\'res.users\']\n File "/odoo/odoo-server/odoo/__init__.py", line 104, in registry\n return modules.registry.Registry(database_name)\n File "/odoo/odoo-server/odoo/modules/registry.py", line 61, in __new__\n return cls.new(db_name)\n File "/odoo/odoo-server/odoo/modules/registry.py", line 73, in new\n registry.init(db_name)\n File "/odoo/odoo-server/odoo/modules/registry.py", line 141, in init\n with closing(self.cursor()) as cr:\n File "/odoo/odoo-server/odoo/modules/registry.py", line 492, in cursor\n return self._db.cursor()\n File "/odoo/odoo-server/odoo/sql_db.py", line 649, in cursor\n return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)\n File "/odoo/odoo-server/odoo/sql_db.py", line 186, in __init__\n self._cnx = pool.borrow(dsn)\n File "/odoo/odoo-server/odoo/sql_db.py", line 532, in _locked\n return fun(self, *args, **kwargs)\n File "/odoo/odoo-server/odoo/sql_db.py", line 600, in borrow\n **connection_info)\n File "/usr/local/lib/python3.7/dist-packages/psycopg2/__init__.py", line 130, in connect\n conn = _connect(dsn, connection_factory=connection_factory, **kwasync)\npsycopg2.OperationalError: FATAL: database "pnv3" does not exist\n\n'>
Process finished with exit cod
1
Databse does exist, have triple checked my password, not sure what else to do at this point.
The url to Odoo server should include protocol part "http://" in the beginning. Strange that you get the version info at all. What do you get as output for the version?
Also you pass the url as the last parameter to authenticate method and this is not required. This should still not give the error you received.
Try your code with these two fixes and report if this helps:
import xmlrpc.client
url ="http://localhost:8069"
db = "pnv3"
username = "test"
password = "test"
common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))
print(common.version())
uid = common.authenticate(db, username, password, {})
print(uid)
Identical code works on my machine...

Bulk index to elasticsearch using python

I have following code which indexes data to elasticsearch using python,
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import requests
from requests.auth import AuthBase
requests.packages.urllib3.disable_warnings()
class TokenAuth(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization :Bearer'] = f'{self.token}'
return r
es = Elasticsearch('https://localhost:9200/user/type',ca_certs=False,verify_certs=False,auth=TokenAuth(''))
#requests.get('https://httpbin.org/get', auth=TokenAuth('12345abcde-token'))
res = helpers.bulk(es, "ldif2.json", chunk_size=1, request_timeout=200)
It follows token based autheentication , but whan i run this progam i get below error message ,how do i solve this.
Traceback (most recent call last):
File "bulk_index.py", line 20, in <module>
res = helpers.bulk(es, "ldif2.json", chunk_size=1, request_timeout=200)
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\helpers\actions.py", line 300, in bulk
for ok, item in streaming_bulk(client, actions, *args, **kwargs):
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\helpers\actions.py", line 230, in streaming_bulk
**kwargs
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\helpers\actions.py", line 116, in _process_bulk_chunk
raise e
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\helpers\actions.py", line 112, in _process_bulk_chunk
resp = client.bulk("\n".join(bulk_actions) + "\n", *args, **kwargs)
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\client\utils.py", line 84, in _wrapped
return func(*args, params=params, **kwargs)
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\client\__init__.py", line 1498, in bulk
headers={"content-type": "application/x-ndjson"},
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\transport.py", line 353, in perform_request
timeout=timeout,
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\connection\http_urllib3.py", line 239, in perform_request
self._raise_error(response.status, raw_data)
File "C:\Users\mkumaru\AppData\Local\Programs\Python\Python37\lib\site-packages\elasticsearch\connection\base.py", line 168, in _raise_error
status_code, error_message, additional_info
elasticsearch.exceptions.AuthenticationException: AuthenticationException(401, 'Access denied')```
I think es should be like this.
es = Elasticsearch("http://127.0.0.1:9200", http_auth=('user', 'passwd'))

How to write a DownloadHandler for scrapy that makes socks4 requests through txsocksx

I am working on a college project, but I need to make the code below work with socks4 instead of tor/socks5. I have tried modifying SOCKS5Agent to SOCKS4Agent but then I receive and error:
Original code: https://stackoverflow.com/a/33944924/11219616
My code:
import scrapy.core.downloader.handlers.http11 as handler
from twisted.internet import reactor
from txsocksx.http import SOCKS4Agent
from twisted.internet.endpoints import TCP4ClientEndpoint
from scrapy.core.downloader.webclient import _parse
class TorScrapyAgent(handler.ScrapyAgent):
_Agent = SOCKS4Agent
def _get_agent(self, request, timeout):
proxy = request.meta.get('proxy')
if proxy:
proxy_scheme, _, proxy_host, proxy_port, _ = _parse(proxy)
if proxy_scheme == 'socks4':
endpoint = TCP4ClientEndpoint(reactor, proxy_host, proxy_port)
return self._Agent(reactor, proxyEndpoint=endpoint)
return super(TorScrapyAgent, self)._get_agent(request, timeout)
class TorHTTPDownloadHandler(handler.HTTP11DownloadHandler):
def download_request(self, request, spider):
agent = TorScrapyAgent(contextFactory=self._contextFactory, pool=self._pool,
maxsize=getattr(spider, 'download_maxsize', self._default_maxsize),
warnsize=getattr(spider, 'download_warnsize', self._default_warnsize))
return agent.download_request(request)
I get the error:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1416, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 491, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\core\downloader\middleware.py", line 43, in process_request
defer.returnValue((yield download_func(request=request,spider=spider)))
File "C:\Python27\lib\site-packages\ometa\protocol.py", line 53, in dataReceived
self._parser.receive(data)
File "C:\Python27\lib\site-packages\ometa\tube.py", line 41, in receive
status = self._interp.receive(data)
File "C:\Python27\lib\site-packages\ometa\interp.py", line 48, in receive
for x in self.next:
File "C:\Python27\lib\site-packages\ometa\interp.py", line 177, in apply
for x in self._apply(f, ruleName, argvals):
File "C:\Python27\lib\site-packages\ometa\interp.py", line 110, in _apply
for x in rule():
File "C:\Python27\lib\site-packages\ometa\interp.py", line 256, in parse_Or
for x in self._eval(subexpr):
File "C:\Python27\lib\site-packages\ometa\interp.py", line 241, in parse_And
for x in self._eval(subexpr):
File "C:\Python27\lib\site-packages\ometa\interp.py", line 440, in parse_Action
val = eval(expr.data, self.globals, self._localsStack[-1])
File "<string>", line 1, in <module>
File "C:\Python27\lib\site-packages\txsocksx\client.py", line 276, in serverResponse
raise e.socks4ErrorMap.get(status)()
RequestRejectedOrFailed

Sometimes empty connection string from `getenv` in module

We have a db_connection.py module which looks like below.
from os import getenv
from collections import OrderedDict
import asyncpg
from tornado import gen
from util.utils import custom_exception
import time
db_name = getenv("DB_NAME", "XXX")
db_user = getenv("DB_USER", "YYY")
db_password = getenv("DB_PASSWORD", "ZZZ")
db_host = getenv("DB_HOST", "localhost")
db_port = getenv("DB_PORT", "5432")
db_args = dict(user=db_user, password=db_password,
database=db_name, host=db_host, port=db_port)
class db_connection(object):
def __init__(self, **db_args):
self.db_pool = []
self.init(**db_args)
# create a pool ref coroutine where we can get connections from
# needs user, password, database, host, port
#gen.coroutine
def init(self,**db_args):
self.db_pool = yield asyncpg.create_pool(**db_args)
#gen.coroutine
def exit(self):
yield self.db_pool.close()
# run a queer
async def run(self,q):
if not self.db_pool:
self.init() #try again
if not self.db_pool:
raise custom_exception(reason='Database connection error', status_code=500)
async with self.db_pool.acquire() as connection:
ret = await connection.fetch(q)
# format to match the old data types
return [OrderedDict(e) for e in ret]
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit()
self.db_pool = []
In the app, we initialize the db_connection object as follows:
from util.db_connection import db_args, db_connection
(...)
if __name__ == "__main__":
AsyncIOMainLoop().install()
dbc = db_connection(**db_args)
# What we have here is a rather basic tornado app
app = make_app()
app.listen(port=8080)
asyncio.get_event_loop().run_forever()
The problem we saw is that, and this is something I can not explain, it seems that the connection string (db_args) is not always set and sometimes appears to be an empty dictionary; asyncpg then falls back and tries to get a connection string from the linux user associated to the container via os.
Mar 5 16:59:50 ABC: ERROR:
tornado.application:Future <tornado.concurrent.Future object at 0x7eff6005c320>
exception was never retrieved:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1063, in run
yielded = self.gen.throw(*exc_info)
File "/abc/def/db_connection.py", line 26, in init
self.db_pool = yield asyncpg.create_pool(**db_args)
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/usr/local/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info) Mar 5 16:59:50 ABC: File "<string>", line 4, in raise_exc_info
File "/usr/local/lib/python3.6/site-packages/tornado/gen.py", line 307, in wrapper
yielded = next(result)
File "<string>", line 6, in _wrap_awaitable
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 356, in _async__init__
await first_ch.connect()
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 126, in connect
**self._connect_kwargs)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 1498, in connect
max_cacheable_statement_size=max_cacheable_statement_size)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 296, in _connect
addrs, params, config=_parse_connect_arguments(timeout=timeout, **kwargs)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 242, in _parse_connect_arguments
server_settings=server_settings)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connect_utils.py", line 152, in _parse_connect_dsn_and_args
user=getpass.getuser()
File "/usr/local/lib/python3.6/getpass.py", line 169, in getuser
return pwd.getpwuid(os.getuid())[0]
KeyError: 'getpwuid(): uid not found: 10001

Categories

Resources