Why is Pymongo shutting down the connection mid-process? - python

I am running a python script in Jupyter Notebook that connects to a mongo db to process text documents (news articles). The script runs fine for the first few batches of data, but then terminates with the following error:
AutoReconnect: localhost:27017: [WinError 10054] An existing connection was forcibly closed by the remote host (full error below).
Have already tried deleting the mongod.lock file as prescribed here(Pymongo keeps refusing the connection at 27017), however, this hasn't solved the issue.
This is my function (with a few sub-functions as well not included here):
data_list = []
for collection in mongo_collections_dir:
mongo_collection = mongo_collections_dir[collection]
filter_dict = {"file_info.source": source}
if filter_year:
filter_dict["extracted.publication_date.year"] = filter_year
elif min_year:
print('filter year not found') # - note added by dror for debug 24/10/2019
filter_dict["extracted.publication_date.year"] = {"$gt": min_year}
source_count = mongo_collection.count(filter_dict)
print("{} articles found in collection {} {}".format(source_count, collection, filter_year))
if source_count == "0":
continue
docs = mongo_collection.find(filter_dict, no_cursor_timeout=True)
if not docs:
print("source {} was not found in collection {}".format(source, collection))
continue
for pos, doc in enumerate(docs):
if pos % 100000 == 0:
print("processed {} articles out of {} from {}".format(pos, source_count, source))
try:
text = doc["body"]["content"]
except KeyError:
# print('no body')
continue
if clean_text:
clean_text = mpd_clean_text(text, stop_words)
else:
clean_text = ''
try:
title = doc['body']['head']['hedline']
# author = doc['body']['head']['byline']
temp_dir = {"collection": mongo_collection.name, "source": doc["file_info"]["source"],
"urn": doc["urn"], "title": title,
'unit_text': text, 'clean text': clean_text,
}
except KeyError:
temp_dir = {"collection": mongo_collection.name, "source": doc["file_info"]["source"], "urn": doc["urn"],
'unit_text': text, 'clean text': clean_text}
try:
publication_date = get_dt(doc["extracted"]["publication_date"])
temp_dir['publication_date'] = publication_date
except KeyError:
print('no extracted')
try:
temp_dir['section'] = doc['extracted']['section']
except KeyError:
pass
try:
temp_dir['publication_name'] = doc['extracted']['publication_name']
except KeyError:
pass
if temp_dir:
# temp_dir['section'] = section
data_list.append(temp_dir)
# df = pd.DataFrame(data_list)
# df['section'] = section
return pd.DataFrame(data_list)
Full error:
~\Anaconda3\envs\py35\lib\site-packages\pymongo\pool.py in command(self, dbname, spec, slave_ok, read_preference, codec_options, check, allowable_errors, check_keys, read_concern, write_concern, parse_write_concern_error, collation, session, client, retryable_write, publish_events)
578 use_op_msg=self.op_msg_enabled,
--> 579 unacknowledged=unacknowledged)
580 except OperationFailure:
~\Anaconda3\envs\py35\lib\site-packages\pymongo\network.py in command(sock, dbname, spec, slave_ok, is_mongos, read_preference, codec_options, session, client, check, allowable_errors, address, check_keys, listeners, max_bson_size, read_concern, parse_write_concern_error, collation, compression_ctx, use_op_msg, unacknowledged)
140 else:
--> 141 reply = receive_message(sock, request_id)
142 unpacked_docs = reply.unpack_response(codec_options=codec_options)
~\Anaconda3\envs\py35\lib\site-packages\pymongo\network.py in receive_message(sock, request_id, max_message_size)
172 length, _, response_to, op_code = _UNPACK_HEADER(
--> 173 _receive_data_on_socket(sock, 16))
174 # No request_id for exhaust cursor "getMore".
~\Anaconda3\envs\py35\lib\site-packages\pymongo\network.py in _receive_data_on_socket(sock, length)
231 try:
--> 232 chunk_length = sock.recv_into(mv[bytes_read:])
233 except (IOError, OSError) as exc:
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
AutoReconnect Traceback (most recent call last)
<ipython-input-14-37a7effc9859> in <module>
----> 1 collect_df_by_sources(collections_dir, sources, newspapers_df, filter_year='2008')
<ipython-input-8-bbd9e966e95c> in collect_df_by_sources(collections_dir, sources, newspapers_df, return_df, filter_year, min_year, override)
20
21 print("collect articles from: {}, {}".format(source_name, source_id))
---> 22 source_df = collect_df_by_source(collections_dir, source_id, filter_year=filter_year, min_year=min_year)
23 source_df.to_csv(source_path, encoding='utf8')
24 if return_df:
<ipython-input-7-82d129ff329c> in collect_df_by_source(mongo_collections_dir, source, clean_text, filter_year, min_year)
9 print('filter year not found') # - note added by dror for debug 24/10/2019
10 filter_dict["extracted.publication_date.year"] = {"$gt": min_year}
---> 11 source_count = mongo_collection.count(filter_dict)
12 print("{} articles found in collection {} {}".format(source_count, collection, filter_year))
13 if source_count == "0":
~\Anaconda3\envs\py35\lib\site-packages\pymongo\collection.py in count(self, filter, session, **kwargs)
1764 collation = validate_collation_or_none(kwargs.pop('collation', None))
1765 cmd.update(kwargs)
-> 1766 return self._count(cmd, collation, session)
1767
1768 def create_indexes(self, indexes, session=None, **kwargs):
~\Anaconda3\envs\py35\lib\site-packages\pymongo\collection.py in _count(self, cmd, collation, session)
1570 read_concern=self.read_concern,
1571 collation=collation,
-> 1572 session=session)
1573 if res.get("errmsg", "") == "ns missing":
1574 return 0
~\Anaconda3\envs\py35\lib\site-packages\pymongo\collection.py in _command(self, sock_info, command, slave_ok, read_preference, codec_options, check, allowable_errors, read_concern, write_concern, collation, session, retryable_write)
242 session=s,
243 client=self.__database.client,
--> 244 retryable_write=retryable_write)
245
246 def __create(self, options, collation, session):
~\Anaconda3\envs\py35\lib\site-packages\pymongo\pool.py in command(self, dbname, spec, slave_ok, read_preference, codec_options, check, allowable_errors, check_keys, read_concern, write_concern, parse_write_concern_error, collation, session, client, retryable_write, publish_events)
582 # Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
583 except BaseException as error:
--> 584 self._raise_connection_failure(error)
585
586 def send_message(self, message, max_doc_size):
~\Anaconda3\envs\py35\lib\site-packages\pymongo\pool.py in _raise_connection_failure(self, error)
741 self.close()
742 if isinstance(error, socket.error):
--> 743 _raise_connection_failure(self.address, error)
744 else:
745 raise error
~\Anaconda3\envs\py35\lib\site-packages\pymongo\pool.py in _raise_connection_failure(address, error, msg_prefix)
281 raise NetworkTimeout(msg)
282 else:
--> 283 raise AutoReconnect(msg)
284
285
AutoReconnect: localhost:27017: [WinError 10054] An existing connection was forcibly closed by the remote host```

Related

Accessing Trino Via Python

I'm trying to access trino via python and I keep getting the error below. This is to access the trino interface and retrieve data from oracle. Please advise on how this can be resolved.
HttpError Traceback (most recent call last)
/tmp/ipykernel_166/2315044439.py in <module>
10 )
11 cur = conn.cursor()
---> 12 cur.execute('SELECT * FROM system.runtime.nodes')
13 rows = cur.fetchall()
14 print(cur)
~/.local/lib/python3.9/site-packages/trino/dbapi.py in execute(self, operation, params)
394 else:
395 self._query = trino.client.TrinoQuery(self._request, sql=operation)
--> 396 result = self._query.execute()
397 self._iterator = iter(result)
398 return result
~/.local/lib/python3.9/site-packages/trino/client.py in execute(self, additional_http_headers)
505
506 response = self._request.post(self._sql, additional_http_headers)
--> 507 status = self._request.process(response)
508 self.query_id = status.id
509 self._stats.update({"queryId": self.query_id})
~/.local/lib/python3.9/site-packages/trino/client.py in process(self, http_response)
382 def process(self, http_response) -> TrinoStatus:
383 if not http_response.ok:
--> 384 self.raise_response_error(http_response)
385
386 http_response.encoding = "utf-8"
~/.local/lib/python3.9/site-packages/trino/client.py in raise_response_error(self, http_response)
373 raise exceptions.Http503Error("error 503: service unavailable")
374
--> 375 raise exceptions.HttpError(
376 "error {}{}".format(
377 http_response.status_code,
HttpError: error 401: b'Unknown signing key ID' ```

OSError after SFTP successfully started with pysftp

I am using put from pysftp to upload some files to a SFTP server.
I have a Python list filesToUpload_bordet and I am uploading each file x using a for loop within a context manager, as shown below:
# Enter the sftp server and perform the operations
with pysftp.Connection(host=hostname,
username=username,
password=password) as sftp:
# Create and cd to the directory where the files will be uploaded
try:
sftp.mkdir(f'{lastRun}')
except OSError:
append(line=f"{timeStamp} run {lastRun}: {panel} WARNING: sftp directory /ngs/{lastRun} already exists, so not creating\n",logFile=logFile)
with sftp.cd(f'/ngs/{lastRun}'):
for x in filesToUpload_bordet:
# put the vcf files
sftp.put(x)
I know that the snipped above works because the upload successfully started. Though, after sometime I am getting the following error message on the Python console:
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
/nexus/databases/ngsRunStats_FK/postPipeline/scripts/newStrategy/STEPS_postPipeline_prod.py in <module>
----> 1 sftpUpload(panel)
/nexus/databases/ngsRunStats_FK/postPipeline/scripts/newStrategy/STEPS_postPipeline_prod.py in sftpUpload(panel)
1212 for x in filesToUpload_bordet:
1213 # put the vcf files
---> 1214 sftp.put(x)
1215 sftp.close() # this is probably not necessary as I am working on a context manager
1216 # this message should be sent independently from the coverage check email
~/miniconda3/lib/python3.7/site-packages/pysftp/__init__.py in put(self, localpath, remotepath, callback, confirm, preserve_mtime)
362
363 sftpattrs = self._sftp.put(localpath, remotepath, callback=callback,
--> 364 confirm=confirm)
365 if preserve_mtime:
366 self._sftp.utime(remotepath, times)
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_client.py in put(self, localpath, remotepath, callback, confirm)
757 file_size = os.stat(localpath).st_size
758 with open(localpath, "rb") as fl:
--> 759 return self.putfo(fl, remotepath, file_size, callback, confirm)
760
761 def getfo(self, remotepath, fl, callback=None):
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_client.py in putfo(self, fl, remotepath, file_size, callback, confirm)
715 fr.set_pipelined(True)
716 size = self._transfer_with_callback(
--> 717 reader=fl, writer=fr, file_size=file_size, callback=callback
718 )
719 if confirm:
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_client.py in _transfer_with_callback(self, reader, writer, file_size, callback)
677 while True:
678 data = reader.read(32768)
--> 679 writer.write(data)
680 size += len(data)
681 if len(data) == 0:
~/miniconda3/lib/python3.7/site-packages/paramiko/file.py in write(self, data)
403 raise IOError("File not open for writing")
404 if not (self._flags & self.FLAG_BUFFERED):
--> 405 self._write_all(data)
406 return
407 self._wbuffer.write(data)
~/miniconda3/lib/python3.7/site-packages/paramiko/file.py in _write_all(self, data)
520 # a socket).
521 while len(data) > 0:
--> 522 count = self._write(data)
523 data = data[count:]
524 if self._flags & self.FLAG_APPEND:
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_file.py in _write(self, data)
206 while len(self._reqs):
207 req = self._reqs.popleft()
--> 208 t, msg = self.sftp._read_response(req)
209 if t != CMD_STATUS:
210 raise SFTPError("Expected status")
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_client.py in _read_response(self, waitfor)
863 # synchronous
864 if t == CMD_STATUS:
--> 865 self._convert_status(msg)
866 return t, msg
867
~/miniconda3/lib/python3.7/site-packages/paramiko/sftp_client.py in _convert_status(self, msg)
896 raise IOError(errno.EACCES, text)
897 else:
--> 898 raise IOError(text)
899
900 def _adjust_cwd(self, path):
OSError: Failure
Can this be anything other than a time-out issue? I see that my first file was successfully uploaded at 09:08am and I got the error message at 11:49am.
The "Failure" is an error message for SFTP error code 4, returned by the OpenSSH SFTP server for various problems, for which there's no more specific code in the SFTP protocol version 3. While the server should at least return a specific plain-text error message, it fails to do so.
Common reasons you may get the generic "Failure" error message, while uploading are:
Uploading a file to a full filesystem (HDD).
Exceeding a user disk quota.
For details, see SFTP Status/Error Code 4 (Failure).

Accessing Shared Mailbox Using Exchangelib — Python

Trying to Access a Shared Folder using the following code :
credentials = Credentials(username = user_name, password = "secret")
config = Configuration(server ='outlook.office365.com', credentials = credentials, auth_type=NTLM)
account = Account(primary_smtp_address = 'shared_mail#domain.com', credentials = credentials, autodiscover = False, config = config, access_type = DELEGATE,)
The above three lines of Code work perfectly but we are unable to get the root,
the following code : account.root.tree() or account.root throws the following error:
KeyError Traceback (most recent call last)
~\anaconda3\lib\site-packages\cached_property.py in __get__(self, obj, cls)
68 # check if the value was computed before the lock was acquired
---> 69 return obj_dict[name]
70
KeyError: 'root'
During handling of the above exception, another exception occurred:
ErrorNonExistentMailbox Traceback (most recent call last)
<ipython-input-46-a90a4f76ca21> in <module>
2 logging.basicConfig(level=logging.DEBUG)
3
----> 4 account.root.tree()
~\anaconda3\lib\site-packages\cached_property.py in __get__(self, obj, cls)
71 except KeyError:
72 # if not, do the calculation and release the lock
---> 73 return obj_dict.setdefault(name, self.func(obj))
74
75
~\anaconda3\lib\site-packages\exchangelib\account.py in root(self)
268 #threaded_cached_property
269 def root(self):
--> 270 return Root.get_distinguished(account=self)
271
272 #threaded_cached_property
~\anaconda3\lib\site-packages\exchangelib\folders\roots.py in get_distinguished(cls, account)
107 return cls.resolve(
108 account=account,
--> 109 folder=cls(account=account, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
110 )
111 except ErrorFolderNotFound:
~\anaconda3\lib\site-packages\exchangelib\folders\base.py in resolve(cls, account, folder)
485 def resolve(cls, account, folder):
486 # Resolve a single folder
--> 487 folders = list(FolderCollection(account=account, folders=[folder]).resolve())
488 if not folders:
489 raise ErrorFolderNotFound('Could not find folder %r' % folder)
~\anaconda3\lib\site-packages\exchangelib\folders\collections.py in resolve(self)
254 additional_fields = self.get_folder_fields(target_cls=self._get_target_cls(), is_complex=None)
255 for f in self.__class__(account=self.account, folders=resolveable_folders).get_folders(
--> 256 additional_fields=additional_fields
257 ):
258 yield f
~\anaconda3\lib\site-packages\exchangelib\folders\collections.py in get_folders(self, additional_fields)
317 folders=self.folders,
318 additional_fields=additional_fields,
--> 319 shape=ID_ONLY,
320 ):
321 yield f
~\anaconda3\lib\site-packages\exchangelib\services\get_folder.py in call(self, folders, additional_fields, shape)
32 **dict(
33 additional_fields=additional_fields,
---> 34 shape=shape,
35 )
36 )):
~\anaconda3\lib\site-packages\exchangelib\services\common.py in _pool_requests(self, payload_func, items, **kwargs)
538 for i, chunk in enumerate(chunkify(items, self.chunk_size), start=1):
539 log.debug('Processing %s chunk %s containing %s items', self.__class__.__name__, i, len(chunk))
--> 540 for elem in self._get_elements(payload=payload_func(chunk, **kwargs)):
541 yield elem
542
~\anaconda3\lib\site-packages\exchangelib\services\common.py in _get_elements_in_response(self, response)
401 def _get_elements_in_response(self, response):
402 for msg in response:
--> 403 container_or_exc = self._get_element_container(message=msg, name=self.element_container_name)
404 if isinstance(container_or_exc, (bool, Exception)):
405 yield container_or_exc
~\anaconda3\lib\site-packages\exchangelib\services\common.py in _get_element_container(self, message, response_message, name)
360 # rspclass == 'Error', or 'Success' and not 'NoError'
361 try:
--> 362 raise self._get_exception(code=response_code, text=msg_text, msg_xml=msg_xml)
363 except self.ERRORS_TO_CATCH_IN_RESPONSE as e:
364 return e
ErrorNonExistentMailbox: Mailbox does not exist.
The same code seems to be working here : https://medium.com/#theamazingexposure/accessing-shared-mailbox-using-exchangelib-python-f020e71a96ab
Also checked this thread https://github.com/ecederstrand/exchangelib/issues/391 and tried almost all the solutions but facing the same error.

python-redis:ConnectionError: Error 32 while writing to socket. Broken pipe?

I want to test the value of redis can support up to 512MB
language:python
import redis
conn = redis.Redis()
temp_dict={}
for i in range(1000000):
temp_dict.update({str(i):str(i)})
conn.hmset('hash-key',temp_dict)
errors:
---------------------------------------------------------------------------
ConnectionError Traceback (most recent call last)
in ()
----> 1 conn.hmset('hash-key',temp_dict)
/usr/local/lib/python2.7/dist-packages/redis/client.pyc in hmset(self, name, mapping)
2009 for pair in iteritems(mapping):
2010 items.extend(pair)
-> 2011 return self.execute_command('HMSET', name, *items)
2012
2013 def hmget(self, name, keys, *args):
/usr/local/lib/python2.7/dist-packages/redis/client.pyc in execute_command(self, *args, **options)
671 if not connection.retry_on_timeout and isinstance(e, TimeoutError):
672 raise
--> 673 connection.send_command(*args)
674 return self.parse_response(connection, command_name, **options)
675 finally:
/usr/local/lib/python2.7/dist-packages/redis/connection.pyc in send_command(self, *args)
608 def send_command(self, *args):
609 "Pack and send a command to the Redis server"
--> 610 self.send_packed_command(self.pack_command(*args))
611
612 def can_read(self, timeout=0):
/usr/local/lib/python2.7/dist-packages/redis/connection.pyc in send_packed_command(self, command)
601 errmsg = e.args[1]
602 raise ConnectionError("Error %s while writing to socket. %s." %
--> 603 (errno, errmsg))
604 except:
605 self.disconnect()
ConnectionError: Error 104 while writing to socket. Connection reset by peer.
Maybe the data inserted at one time is too large, and the data can be separated and only 100000 pieces can be hmset at a time。
import redis
conn = redis.Redis()
for x in range(10):
temp_dict={}
for i in range(100000):
temp_dict.update({str(i):str(i)})
conn.hmset('hash-key',temp_dict)

Unable to execute commands in remote mongo using pymongo

I am able to connect to my remote db (after authentication, of course) to a database. But I am not able to execute any commands or even list collections.
mongo_url = "blah.com:12345"
db_name = "db_name"
db_user_name = "user"
db_password = "password"
mongo_uri = "mongodb://" + db_user_name + ":" + db_password + "#" + mongo_url + "/" + db_name
connection = pymongo.MongoClient(mongo_uri)
db = connection[db_name]
print db.authenticate(db_user_name, db_password) // Returns True
However I am not able to use commands like :
db.collection_names() or any command using db.command()
I get this error stack (sayng Authentcation failed):
---------------------------------------------------------------------------
OperationFailure Traceback (most recent call last)
<ipython-input-13-1840c0979539> in <module>()
----> 1 db.collection_names()
D:\Continuum\Anaconda2\lib\site-packages\pymongo\database.pyc in collection_names(self, include_system_collections)
515 """
516 with self.__client._socket_for_reads(
--> 517 ReadPreference.PRIMARY) as (sock_info, slave_okay):
518
519 wire_version = sock_info.max_wire_version
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _socket_for_reads(self, read_preference)
796 topology = self._get_topology()
797 single = topology.description.topology_type == TOPOLOGY_TYPE.Single
--> 798 with self._get_socket(read_preference) as sock_info:
799 slave_ok = (single and not sock_info.is_mongos) or (
800 preference != ReadPreference.PRIMARY)
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _get_socket(self, selector)
762 server = self._get_topology().select_server(selector)
763 try:
--> 764 with server.get_socket(self.__all_credentials) as sock_info:
765 yield sock_info
766 except NetworkTimeout:
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\server.pyc in get_socket(self, all_credentials, checkout)
161 #contextlib.contextmanager
162 def get_socket(self, all_credentials, checkout=False):
--> 163 with self.pool.get_socket(all_credentials, checkout) as sock_info:
164 yield sock_info
165
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in get_socket(self, all_credentials, checkout)
582 sock_info = self._get_socket_no_auth()
583 try:
--> 584 sock_info.check_auth(all_credentials)
585 yield sock_info
586 except:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in check_auth(self, all_credentials)
330
331 for credentials in cached - authset:
--> 332 auth.authenticate(credentials, self)
333 self.authset.add(credentials)
334
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in authenticate(credentials, sock_info)
462 mechanism = credentials.mechanism
463 auth_func = _AUTH_MAP.get(mechanism)
--> 464 auth_func(credentials, sock_info)
465
466
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_default(credentials, sock_info)
442 def _authenticate_default(credentials, sock_info):
443 if sock_info.max_wire_version >= 3:
--> 444 return _authenticate_scram_sha1(credentials, sock_info)
445 else:
446 return _authenticate_mongo_cr(credentials, sock_info)
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_scram_sha1(credentials, sock_info)
226 ('conversationId', res['conversationId']),
227 ('payload', Binary(client_final))])
--> 228 res = sock_info.command(source, cmd)
229
230 parsed = _parse_scram_response(res['payload'])
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in command(self, dbname, spec, slave_ok, read_preference, codec_options, check, allowable_errors, check_keys, read_concern)
237 check, allowable_errors, self.address,
238 check_keys, self.listeners, self.max_bson_size,
--> 239 read_concern)
240 except OperationFailure:
241 raise
D:\Continuum\Anaconda2\lib\site-packages\pymongo\network.pyc in command(sock, dbname, spec, slave_ok, is_mongos, read_preference, codec_options, check, allowable_errors, address, check_keys, listeners, max_bson_size, read_concern)
100 response_doc = unpacked['data'][0]
101 if check:
--> 102 helpers._check_command_response(response_doc, None, allowable_errors)
103 except Exception as exc:
104 if publish:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\helpers.pyc in _check_command_response(response, msg, allowable_errors)
203
204 msg = msg or "%s"
--> 205 raise OperationFailure(msg % errmsg, code, response)
206
207
OperationFailure: Authentication failed.
But I am able to do these operations from my mongo shell.

Categories

Resources