Unable to execute commands in remote mongo using pymongo - python

I am able to connect to my remote db (after authentication, of course) to a database. But I am not able to execute any commands or even list collections.
mongo_url = "blah.com:12345"
db_name = "db_name"
db_user_name = "user"
db_password = "password"
mongo_uri = "mongodb://" + db_user_name + ":" + db_password + "#" + mongo_url + "/" + db_name
connection = pymongo.MongoClient(mongo_uri)
db = connection[db_name]
print db.authenticate(db_user_name, db_password) // Returns True
However I am not able to use commands like :
db.collection_names() or any command using db.command()
I get this error stack (sayng Authentcation failed):
---------------------------------------------------------------------------
OperationFailure Traceback (most recent call last)
<ipython-input-13-1840c0979539> in <module>()
----> 1 db.collection_names()
D:\Continuum\Anaconda2\lib\site-packages\pymongo\database.pyc in collection_names(self, include_system_collections)
515 """
516 with self.__client._socket_for_reads(
--> 517 ReadPreference.PRIMARY) as (sock_info, slave_okay):
518
519 wire_version = sock_info.max_wire_version
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _socket_for_reads(self, read_preference)
796 topology = self._get_topology()
797 single = topology.description.topology_type == TOPOLOGY_TYPE.Single
--> 798 with self._get_socket(read_preference) as sock_info:
799 slave_ok = (single and not sock_info.is_mongos) or (
800 preference != ReadPreference.PRIMARY)
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _get_socket(self, selector)
762 server = self._get_topology().select_server(selector)
763 try:
--> 764 with server.get_socket(self.__all_credentials) as sock_info:
765 yield sock_info
766 except NetworkTimeout:
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\server.pyc in get_socket(self, all_credentials, checkout)
161 #contextlib.contextmanager
162 def get_socket(self, all_credentials, checkout=False):
--> 163 with self.pool.get_socket(all_credentials, checkout) as sock_info:
164 yield sock_info
165
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in get_socket(self, all_credentials, checkout)
582 sock_info = self._get_socket_no_auth()
583 try:
--> 584 sock_info.check_auth(all_credentials)
585 yield sock_info
586 except:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in check_auth(self, all_credentials)
330
331 for credentials in cached - authset:
--> 332 auth.authenticate(credentials, self)
333 self.authset.add(credentials)
334
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in authenticate(credentials, sock_info)
462 mechanism = credentials.mechanism
463 auth_func = _AUTH_MAP.get(mechanism)
--> 464 auth_func(credentials, sock_info)
465
466
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_default(credentials, sock_info)
442 def _authenticate_default(credentials, sock_info):
443 if sock_info.max_wire_version >= 3:
--> 444 return _authenticate_scram_sha1(credentials, sock_info)
445 else:
446 return _authenticate_mongo_cr(credentials, sock_info)
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_scram_sha1(credentials, sock_info)
226 ('conversationId', res['conversationId']),
227 ('payload', Binary(client_final))])
--> 228 res = sock_info.command(source, cmd)
229
230 parsed = _parse_scram_response(res['payload'])
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in command(self, dbname, spec, slave_ok, read_preference, codec_options, check, allowable_errors, check_keys, read_concern)
237 check, allowable_errors, self.address,
238 check_keys, self.listeners, self.max_bson_size,
--> 239 read_concern)
240 except OperationFailure:
241 raise
D:\Continuum\Anaconda2\lib\site-packages\pymongo\network.pyc in command(sock, dbname, spec, slave_ok, is_mongos, read_preference, codec_options, check, allowable_errors, address, check_keys, listeners, max_bson_size, read_concern)
100 response_doc = unpacked['data'][0]
101 if check:
--> 102 helpers._check_command_response(response_doc, None, allowable_errors)
103 except Exception as exc:
104 if publish:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\helpers.pyc in _check_command_response(response, msg, allowable_errors)
203
204 msg = msg or "%s"
--> 205 raise OperationFailure(msg % errmsg, code, response)
206
207
OperationFailure: Authentication failed.
But I am able to do these operations from my mongo shell.

Related

Accessing Trino Via Python

I'm trying to access trino via python and I keep getting the error below. This is to access the trino interface and retrieve data from oracle. Please advise on how this can be resolved.
HttpError Traceback (most recent call last)
/tmp/ipykernel_166/2315044439.py in <module>
10 )
11 cur = conn.cursor()
---> 12 cur.execute('SELECT * FROM system.runtime.nodes')
13 rows = cur.fetchall()
14 print(cur)
~/.local/lib/python3.9/site-packages/trino/dbapi.py in execute(self, operation, params)
394 else:
395 self._query = trino.client.TrinoQuery(self._request, sql=operation)
--> 396 result = self._query.execute()
397 self._iterator = iter(result)
398 return result
~/.local/lib/python3.9/site-packages/trino/client.py in execute(self, additional_http_headers)
505
506 response = self._request.post(self._sql, additional_http_headers)
--> 507 status = self._request.process(response)
508 self.query_id = status.id
509 self._stats.update({"queryId": self.query_id})
~/.local/lib/python3.9/site-packages/trino/client.py in process(self, http_response)
382 def process(self, http_response) -> TrinoStatus:
383 if not http_response.ok:
--> 384 self.raise_response_error(http_response)
385
386 http_response.encoding = "utf-8"
~/.local/lib/python3.9/site-packages/trino/client.py in raise_response_error(self, http_response)
373 raise exceptions.Http503Error("error 503: service unavailable")
374
--> 375 raise exceptions.HttpError(
376 "error {}{}".format(
377 http_response.status_code,
HttpError: error 401: b'Unknown signing key ID' ```

AWS S3 - boto3 LibraryNotFoundError HTTPClientError

I tried to print out the files I have in an S3 bucket. It worked well yesterday but somehow I got the LibraryNotFoundError and HTTPClientError today (I already made sure I have the boto3 library installed). I would appreciate the instruction from you. Thank you.
My code:
import boto3
s3 = boto3.client('s3', aws_access_key_id='my_access_key',aws_secret_access_key='my_secret_key')
bucketname='bucket_name'
s3_client = boto3.resource('s3')
bucket = s3_client.Bucket(bucketname)
for obj in bucket.objects.all():
key = obj.key
print(key)
The error message:
---------------------------------------------------------------------------
LibraryNotFoundError Traceback (most recent call last)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/httpsession.py in send(self, request)
261 decode_content=False,
--> 262 chunked=self._chunked(request.headers),
263 )
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
671 headers=headers,
--> 672 chunked=chunked,
673 )
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
375 try:
--> 376 self._validate_conn(conn)
377 except (SocketTimeout, BaseSSLError) as e:
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in _validate_conn(self, conn)
993 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
--> 994 conn.connect()
995
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connection.py in connect(self)
359 server_hostname=server_hostname,
--> 360 ssl_context=context,
361 )
~/opt/anaconda3/lib/python3.7/site-packages/snowflake/connector/ssl_wrap_socket.py in ssl_wrap_socket_with_ocsp(*args, **kwargs)
400
--> 401 from .ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
402
~/opt/anaconda3/lib/python3.7/site-packages/snowflake/connector/ocsp_asn1crypto.py in <module>
33 use_openssl(libcrypto_path='/usr/lib/libcrypto.35.dylib', libssl_path='/usr/lib/libssl.35.dylib')
---> 34 from oscrypto import asymmetric
35
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/asymmetric.py in <module>
18 )
---> 19 from ._asymmetric import _unwrap_private_key_info
20 from ._errors import pretty_message
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_asymmetric.py in <module>
26
---> 27 from .kdf import pbkdf1, pbkdf2, pkcs12_kdf
28 from .symmetric import (
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/kdf.py in <module>
8 from . import backend
----> 9 from .util import rand_bytes
10 from ._types import type_name, byte_cls, int_types
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/util.py in <module>
9 if sys.platform == 'darwin':
---> 10 from ._mac.util import rand_bytes
11 elif sys.platform == 'win32':
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/util.py in <module>
10 from ._common_crypto import CommonCrypto, CommonCryptoConst
---> 11 from ._security import Security
12
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/_security.py in <module>
8 if ffi() == 'cffi':
----> 9 from ._security_cffi import Security, version_info as osx_version_info
10 from ._core_foundation_cffi import CoreFoundation, CFHelpers
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/_security_cffi.py in <module>
238 if not security_path:
--> 239 raise LibraryNotFoundError('The library Security could not be found')
240
LibraryNotFoundError: The library Security could not be found
During handling of the above exception, another exception occurred:
HTTPClientError Traceback (most recent call last)
<ipython-input-6-4a14fd7aca9a> in <module>
3 bucket = s3_client.Bucket(bucketname)
4
----> 5 for obj in bucket.objects.all():
6 key = obj.key
7 print(key)
~/opt/anaconda3/lib/python3.7/site-packages/boto3/resources/collection.py in __iter__(self)
81
82 count = 0
---> 83 for page in self.pages():
84 for item in page:
85 yield item
~/opt/anaconda3/lib/python3.7/site-packages/boto3/resources/collection.py in pages(self)
164 # we start processing and yielding individual items.
165 count = 0
--> 166 for page in pages:
167 page_items = []
168 for item in self._handler(self._parent, params, page):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/paginate.py in __iter__(self)
253 self._inject_starting_params(current_kwargs)
254 while True:
--> 255 response = self._make_request(current_kwargs)
256 parsed = self._extract_parsed_response(response)
257 if first_request:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/paginate.py in _make_request(self, current_kwargs)
330
331 def _make_request(self, current_kwargs):
--> 332 return self._method(**current_kwargs)
333
334 def _extract_parsed_response(self, response):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _api_call(self, *args, **kwargs)
355 "%s() only accepts keyword arguments." % py_operation_name)
356 # The "self" in this scope is referring to the BaseClient.
--> 357 return self._make_api_call(operation_name, kwargs)
358
359 _api_call.__name__ = str(py_operation_name)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _make_api_call(self, operation_name, api_params)
646 else:
647 http, parsed_response = self._make_request(
--> 648 operation_model, request_dict, request_context)
649
650 self.meta.events.emit(
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _make_request(self, operation_model, request_dict, request_context)
665 def _make_request(self, operation_model, request_dict, request_context):
666 try:
--> 667 return self._endpoint.make_request(operation_model, request_dict)
668 except Exception as e:
669 self.meta.events.emit(
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in make_request(self, operation_model, request_dict)
100 logger.debug("Making request for %s with params: %s",
101 operation_model, request_dict)
--> 102 return self._send_request(request_dict, operation_model)
103
104 def create_request(self, params, operation_model=None):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _send_request(self, request_dict, operation_model)
135 request, operation_model, context)
136 while self._needs_retry(attempts, operation_model, request_dict,
--> 137 success_response, exception):
138 attempts += 1
139 # If there is a stream associated with the request, we need
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _needs_retry(self, attempts, operation_model, request_dict, response, caught_exception)
229 event_name, response=response, endpoint=self,
230 operation=operation_model, attempts=attempts,
--> 231 caught_exception=caught_exception, request_dict=request_dict)
232 handler_response = first_non_none_response(responses)
233 if handler_response is None:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
354 def emit(self, event_name, **kwargs):
355 aliased_event_name = self._alias_event_name(event_name)
--> 356 return self._emitter.emit(aliased_event_name, **kwargs)
357
358 def emit_until_response(self, event_name, **kwargs):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
226 handlers.
227 """
--> 228 return self._emit(event_name, kwargs)
229
230 def emit_until_response(self, event_name, **kwargs):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in _emit(self, event_name, kwargs, stop_on_response)
209 for handler in handlers_to_call:
210 logger.debug('Event %s: calling handler %s', event_name, handler)
--> 211 response = handler(**kwargs)
212 responses.append((handler, response))
213 if stop_on_response and response is not None:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempts, response, caught_exception, **kwargs)
181
182 """
--> 183 if self._checker(attempts, response, caught_exception):
184 result = self._action(attempts=attempts)
185 logger.debug("Retry needed, action of: %s", result)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
249 def __call__(self, attempt_number, response, caught_exception):
250 should_retry = self._should_retry(attempt_number, response,
--> 251 caught_exception)
252 if should_retry:
253 if attempt_number >= self._max_attempts:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in _should_retry(self, attempt_number, response, caught_exception)
267 attempt_number < self._max_attempts:
268 try:
--> 269 return self._checker(attempt_number, response, caught_exception)
270 except self._retryable_exceptions as e:
271 logger.debug("retry needed, retryable exception caught: %s",
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
315 for checker in self._checkers:
316 checker_response = checker(attempt_number, response,
--> 317 caught_exception)
318 if checker_response:
319 return checker_response
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
221 elif caught_exception is not None:
222 return self._check_caught_exception(
--> 223 attempt_number, caught_exception)
224 else:
225 raise ValueError("Both response and caught_exception are None.")
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in _check_caught_exception(self, attempt_number, caught_exception)
357 # the MaxAttemptsDecorator is not interested in retrying the exception
358 # then this exception just propogates out past the retry code.
--> 359 raise caught_exception
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _do_get_response(self, request, operation_model)
198 http_response = first_non_none_response(responses)
199 if http_response is None:
--> 200 http_response = self._send(request)
201 except HTTPClientError as e:
202 return (None, e)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _send(self, request)
242
243 def _send(self, request):
--> 244 return self.http_session.send(request)
245
246
~/opt/anaconda3/lib/python3.7/site-packages/botocore/httpsession.py in send(self, request)
296 message = 'Exception received when sending urllib3 HTTP request'
297 logger.debug(message, exc_info=True)
--> 298 raise HTTPClientError(error=e)
HTTPClientError: An HTTP Client raised and unhandled exception: The library Security could not be found

"Error while extracting" from tensorflow datasets

I want to train a tensorflow image segmentation model on COCO, and thought I would leverage the dataset builder already included. Download seems to be completed but it crashes on extracting the zip files.
Running with TF 2.0.0 on a Jupyter Notebook under a conda environment. Computer is 64-bit Windows 10. The Oxford Pet III dataset used in the official image segmentation tutorial works fine.
Below is the error message (my local user name replaced with %user%).
---------------------------------------------------------------------------
OutOfRangeError Traceback (most recent call last)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
88 try:
---> 89 for path, handle in iter_archive(from_path, method):
90 path = tf.compat.as_text(path)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in iter_zip(arch_f)
176 with _open_or_pass(arch_f) as fobj:
--> 177 z = zipfile.ZipFile(fobj)
178 for member in z.infolist():
~\.conda\envs\tf-tutorial\lib\zipfile.py in __init__(self, file, mode, compression, allowZip64)
1130 if mode == 'r':
-> 1131 self._RealGetContents()
1132 elif mode in ('w', 'x'):
~\.conda\envs\tf-tutorial\lib\zipfile.py in _RealGetContents(self)
1193 try:
-> 1194 endrec = _EndRecData(fp)
1195 except OSError:
~\.conda\envs\tf-tutorial\lib\zipfile.py in _EndRecData(fpin)
263 # Determine file size
--> 264 fpin.seek(0, 2)
265 filesize = fpin.tell()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
506 instructions)
--> 507 return func(*args, **kwargs)
508
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in seek(self, offset, whence, position)
166 elif whence == 2:
--> 167 offset += self.size()
168 else:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in size(self)
101 """Returns the size of the file."""
--> 102 return stat(self.__name).length
103
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat(filename)
726 """
--> 727 return stat_v2(filename)
728
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat_v2(path)
743 file_statistics = pywrap_tensorflow.FileStatistics()
--> 744 pywrap_tensorflow.Stat(compat.as_bytes(path), file_statistics)
745 return file_statistics
OutOfRangeError: C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
During handling of the above exception, another exception occurred:
ExtractError Traceback (most recent call last)
<ipython-input-27-887fa0198611> in <module>
1 cocoBuilder = tfds.builder('coco')
2 info = cocoBuilder.info
----> 3 cocoBuilder.download_and_prepare()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
50 _check_no_positional(fn, args, ismethod, allowed=allowed)
51 _check_required(fn, kwargs)
---> 52 return fn(*args, **kwargs)
53
54 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in download_and_prepare(self, download_dir, download_config)
285 self._download_and_prepare(
286 dl_manager=dl_manager,
--> 287 download_config=download_config)
288
289 # NOTE: If modifying the lines below to put additional information in
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, download_config)
946 super(GeneratorBasedBuilder, self)._download_and_prepare(
947 dl_manager=dl_manager,
--> 948 max_examples_per_split=download_config.max_examples_per_split,
949 )
950
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, **prepare_split_kwargs)
802 # Generating data for all splits
803 split_dict = splits_lib.SplitDict()
--> 804 for split_generator in self._split_generators(dl_manager):
805 if splits_lib.Split.ALL == split_generator.split_info.name:
806 raise ValueError(
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\image\coco.py in _split_generators(self, dl_manager)
237 root_url = 'http://images.cocodataset.org/'
238 extracted_paths = dl_manager.download_and_extract({
--> 239 key: root_url + url for key, url in urls.items()
240 })
241
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in download_and_extract(self, url_or_urls)
357 with self._downloader.tqdm():
358 with self._extractor.tqdm():
--> 359 return _map_promise(self._download_extract, url_or_urls)
360
361 #property
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _map_promise(map_fn, all_inputs)
393 """Map the function into each element and resolve the promise."""
394 all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
--> 395 res = utils.map_nested(_wait_on_promise, all_promises)
396 return res
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in <dictcomp>(.0)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
141 return tuple(mapped)
142 # Singleton
--> 143 return function(data_struct)
144
145
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _wait_on_promise(p)
377
378 def _wait_on_promise(p):
--> 379 return p.get()
380
381 else:
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in get(self, timeout)
508 target = self._target()
509 self._wait(timeout or DEFAULT_TIMEOUT)
--> 510 return self._target_settled_value(_raise=True)
511
512 def _target_settled_value(self, _raise=False):
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _target_settled_value(self, _raise)
512 def _target_settled_value(self, _raise=False):
513 # type: (bool) -> Any
--> 514 return self._target()._settled_value(_raise)
515
516 _value = _reason = _target_settled_value
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _settled_value(self, _raise)
222 if _raise:
223 raise_val = self._fulfillment_handler0
--> 224 reraise(type(raise_val), raise_val, self._traceback)
225 return self._fulfillment_handler0
226
~\.conda\envs\tf-tutorial\lib\site-packages\six.py in reraise(tp, value, tb)
694 if value.__traceback__ is not tb:
695 raise value.with_traceback(tb)
--> 696 raise value
697 finally:
698 value = None
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in handle_future_result(future)
840 # type: (Any) -> None
841 try:
--> 842 resolve(future.result())
843 except Exception as e:
844 tb = exc_info()[2]
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\.conda\envs\tf-tutorial\lib\concurrent\futures\thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
92 except BaseException as err:
93 msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
---> 94 raise ExtractError(msg)
95 # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
96 # directories, so delete destination first, if it already exists.
ExtractError: Error while extracting C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip to C:\Users\%user%\tensorflow_datasets\downloads\extracted\ZIP.images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip : C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
The message seems cryptic to me. The folder to which it is trying to extract does not exist when the notebook is started - it is created by Tensorflow, and only at that command line. I obviously tried deleting it completely and running it again, to no effect.
The code that leads to the error is (everything runs fine until the last line):
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
import tensorflow_datasets as tfds
from IPython.display import clear_output
import matplotlib.pyplot as plt
dataset, info = tfds.load('coco', with_info=True)
Also tried breaking down the last command into assigning the tdfs.builder object and then running download_and_extract, and again got the same error.
There is enough space in disk - after download, still 50+GB available, while the dataset is supposed to be 37GB in its largest version (2014).
I have a similar problem with Windows 10 & COCO 2017. My solution is simple. Extract the ZIP file manually according to the folder path in the error message.

Selenium with python keeps on getting timedout even after I have manually set a really high timeout value

I am trying to use webdriver.Firefox() in my project. My firefox version is 54.0 and selenium version is 3.4.3.
I have tried:
driver = webdriver.Firefox()
but this gives an error message
timeout: timed out
while opening the browser in less than a second so this definitely isn't a timeout issue. I have tried manually setting the timeout as,
driver = webdriver.Firefox(timeout=100000)
Now it works fine at times but gives time out during opening browser and sometimes and sometimes gives time out while making a request even when the time taken for that request was very less, maybe around 2 seconds or less.
Is there an obvious problem here?
This is the full stacktrace
timeout Traceback (most recent call last)
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/test_sprint_registration.py in <module>()
24
25 if __name__ == '__main__':
---> 26 unittest.main()
/usr/lib/python2.7/unittest/main.pyc in __init__(self, module, defaultTest, argv, testRunner, testLoader, exit, verbosity, failfast, catchbreak, buffer)
92 self.testLoader = testLoader
93 self.progName = os.path.basename(argv[0])
---> 94 self.parseArgs(argv)
95 self.runTests()
96
/usr/lib/python2.7/unittest/main.pyc in parseArgs(self, argv)
147 else:
148 self.testNames = (self.defaultTest,)
--> 149 self.createTests()
150 except getopt.error, msg:
151 self.usageExit(msg)
/usr/lib/python2.7/unittest/main.pyc in createTests(self)
153 def createTests(self):
154 if self.testNames is None:
--> 155 self.test = self.testLoader.loadTestsFromModule(self.module)
156 else:
157 self.test = self.testLoader.loadTestsFromNames(self.testNames,
/usr/lib/python2.7/unittest/loader.pyc in loadTestsFromModule(self, module, use_load_tests)
63 obj = getattr(module, name)
64 if isinstance(obj, type) and issubclass(obj, case.TestCase):
---> 65 tests.append(self.loadTestsFromTestCase(obj))
66
67 load_tests = getattr(module, 'load_tests', None)
/usr/lib/python2.7/unittest/loader.pyc in loadTestsFromTestCase(self, testCaseClass)
54 if not testCaseNames and hasattr(testCaseClass, 'runTest'):
55 testCaseNames = ['runTest']
---> 56 loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
57 return loaded_suite
58
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/test_sprint_registration.py in __init__(self, *args, **kwargs)
7
8 def __init__(self, *args, **kwargs):
----> 9 super(RegistrationLandingPageTestCase, self).__init__(*args, **kwargs)
10 user = self.get_user()
11 self.single_phase_hack = self.get_single_phase_hackathon()
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/utils.pyc in __init__(self, *args, **kwargs)
6 def __init__(self, *args, **kwargs):
7 super(SprintFunctionalTestCase, self).__init__(
----> 8 *args, **kwargs)
9 user = self.get_user()
10 self.login(user)
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/utils.pyc in __init__(self, *args, **kwargs)
26 def __init__(self, *args, **kwargs):
27 super(HEFunctionalTestCase, self).__init__(*args, **kwargs)
---> 28 self.browser = webdriver.Firefox(timeout=10000)
29 self.homepage = 'http://localhost:8000'
30 self.keys = Keys
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/webdriver.pyc in __init__(self, firefox_profile, firefox_binary, timeout, capabilities, proxy, executable_path, firefox_options, log_path)
150 command_executor=executor,
151 desired_capabilities=capabilities,
--> 152 keep_alive=True)
153
154 # Selenium remote
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in __init__(self, command_executor, desired_capabilities, browser_profile, proxy, keep_alive, file_detector)
96 warnings.warn("Please use FirefoxOptions to set browser profile",
97 DeprecationWarning)
---> 98 self.start_session(desired_capabilities, browser_profile)
99 self._switch_to = SwitchTo(self)
100 self._mobile = Mobile(self)
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in start_session(self, capabilities, browser_profile)
186 parameters = {"capabilities": w3c_caps,
187 "desiredCapabilities": capabilities}
--> 188 response = self.execute(Command.NEW_SESSION, parameters)
189 if 'sessionId' not in response:
190 response = response['value']
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in execute(self, driver_command, params)
252
253 params = self._wrap_value(params)
--> 254 response = self.command_executor.execute(driver_command, params)
255 if response:
256 self.error_handler.check_response(response)
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/remote_connection.pyc in execute(self, command, params)
462 path = string.Template(command_info[1]).substitute(params)
463 url = '%s%s' % (self._url, path)
--> 464 return self._request(command_info[0], url, body=data)
465
466 def _request(self, method, url, body=None):
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/remote_connection.pyc in _request(self, method, url, body)
486 try:
487 self._conn.request(method, parsed_url.path, body, headers)
--> 488 resp = self._conn.getresponse()
489 except (httplib.HTTPException, socket.error):
490 self._conn.close()
/usr/lib/python2.7/httplib.pyc in getresponse(self, buffering)
1134
1135 try:
-> 1136 response.begin()
1137 assert response.will_close != _UNKNOWN
1138 self.__state = _CS_IDLE
/usr/lib/python2.7/httplib.pyc in begin(self)
451 # read until we get a non-100 response
452 while True:
--> 453 version, status, reason = self._read_status()
454 if status != CONTINUE:
455 break
/usr/lib/python2.7/httplib.pyc in _read_status(self)
407 def _read_status(self):
408 # Initialize with Simple-Response defaults
--> 409 line = self.fp.readline(_MAXLINE + 1)
410 if len(line) > _MAXLINE:
411 raise LineTooLong("header line")
/usr/lib/python2.7/socket.pyc in readline(self, size)
478 while True:
479 try:
--> 480 data = self._sock.recv(self._rbufsize)
481 except error, e:
482 if e.args[0] == EINTR:
timeout: timed out

Create Case query via Django ORM

Using Django 1.8 and Postgres 9.3 - I have a Django model as follows which contains log lines. I would like to extract all domain_name values that were never allowed access.
class Logs(models.Model):
date = DateTimeField(db_index=True)
action = TextField(default='a', null=True, blank=True, db_index=True)
url = TextField(null=True, blank=True)
stats = JsonField(null=True, blank=True)
domain_name = TextField(null=True, blank=True, db_index=True)
<snip>
This SQL query works nicely, but I'm having trouble translating it to a Django ORM queryset.
select domain_name from reporter_log
GROUP BY domain_name
HAVING COUNT(CASE WHEN action = 'a' OR action = 'ae' then 1 END) = 0;
I would have expected the following queryset would work:
LogLine.objects.annotate(
allowed=Count(Case(
When(action='a', then=Value(1)),
When(action='ae', then=Value(1)),
default=Value(0),
output_field=IntegerField(),
)
))
But I get a traceback:
ProgrammingError Traceback (most recent call last)
<ipython-input-4-feb056328fe8> in <module>()
4 When(action='ae', then=Value(1)),
5 default=Value(0),
----> 6 output_field=IntegerField(),
7 )
8 ))
/usr/lib/python3/dist-packages/IPython/core/displayhook.py in __call__(self, result)
245 self.start_displayhook()
246 self.write_output_prompt()
--> 247 format_dict, md_dict = self.compute_format_data(result)
248 self.write_format_data(format_dict, md_dict)
249 self.update_user_ns(result)
/usr/lib/python3/dist-packages/IPython/core/displayhook.py in compute_format_data(self, result)
155
156 """
--> 157 return self.shell.display_formatter.format(result)
158
159 def write_format_data(self, format_dict, md_dict=None):
/usr/lib/python3/dist-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
150 md = None
151 try:
--> 152 data = formatter(obj)
153 except:
154 # FIXME: log the exception
/usr/lib/python3/dist-packages/IPython/core/formatters.py in __call__(self, obj)
478 type_pprinters=self.type_printers,
479 deferred_pprinters=self.deferred_printers)
--> 480 printer.pretty(obj)
481 printer.flush()
482 return stream.getvalue()
/usr/lib/python3/dist-packages/IPython/lib/pretty.py in pretty(self, obj)
361 if isinstance(meth, collections.Callable):
362 return meth(obj, self, cycle)
--> 363 return _default_pprint(obj, self, cycle)
364 finally:
365 self.end_group()
/usr/lib/python3/dist-packages/IPython/lib/pretty.py in _default_pprint(obj, p, cycle)
481 if getattr(klass, '__repr__', None) not in _baseclass_reprs:
482 # A user-provided repr.
--> 483 p.text(repr(obj))
484 return
485 p.begin_group(1, '<')
/usr/local/lib/python3.4/dist-packages/django/db/models/query.py in __repr__(self)
136
137 def __repr__(self):
--> 138 data = list(self[:REPR_OUTPUT_SIZE + 1])
139 if len(data) > REPR_OUTPUT_SIZE:
140 data[-1] = "...(remaining elements truncated)..."
/usr/local/lib/python3.4/dist-packages/django/db/models/query.py in __iter__(self)
160 - Responsible for turning the rows into model objects.
161 """
--> 162 self._fetch_all()
163 return iter(self._result_cache)
164
/usr/local/lib/python3.4/dist-packages/django/db/models/query.py in _fetch_all(self)
963 def _fetch_all(self):
964 if self._result_cache is None:
--> 965 self._result_cache = list(self.iterator())
966 if self._prefetch_related_lookups and not self._prefetch_done:
967 self._prefetch_related_objects()
/usr/local/lib/python3.4/dist-packages/django/db/models/query.py in iterator(self)
236 # Execute the query. This will also fill compiler.select, klass_info,
237 # and annotations.
--> 238 results = compiler.execute_sql()
239 select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
240 compiler.annotation_col_map)
/usr/local/lib/python3.4/dist-packages/django/db/models/sql/compiler.py in execute_sql(self, result_type)
838 cursor = self.connection.cursor()
839 try:
--> 840 cursor.execute(sql, params)
841 except Exception:
842 cursor.close()
/usr/local/lib/python3.4/dist-packages/django/db/backends/utils.py in execute(self, sql, params)
77 start = time()
78 try:
---> 79 return super(CursorDebugWrapper, self).execute(sql, params)
80 finally:
81 stop = time()
/usr/local/lib/python3.4/dist-packages/django/db/backends/utils.py in execute(self, sql, params)
62 return self.cursor.execute(sql)
63 else:
---> 64 return self.cursor.execute(sql, params)
65
66 def executemany(self, sql, param_list):
/usr/local/lib/python3.4/dist-packages/django/db/utils.py in __exit__(self, exc_type, exc_value, traceback)
95 if dj_exc_type not in (DataError, IntegrityError):
96 self.wrapper.errors_occurred = True
---> 97 six.reraise(dj_exc_type, dj_exc_value, traceback)
98
99 def __call__(self, func):
/usr/local/lib/python3.4/dist-packages/django/utils/six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
/usr/local/lib/python3.4/dist-packages/django/db/backends/utils.py in execute(self, sql, params)
62 return self.cursor.execute(sql)
63 else:
---> 64 return self.cursor.execute(sql, params)
65
66 def executemany(self, sql, param_list):
ProgrammingError: could not identify an equality operator for type json
LINE 1: ...e"."domain_name", "reporter_logs"."rule_type", "reporter_...
^
Here is the output of the query that fails.
[{'sql': 'SELECT "reporter_logs"."id", "reporter_logs"."date", "reporter_logs"."userip", "reporter_logs"."action", "reporter_logs"."url", "reporter_logs"."method", "reporter_logs"."status", "reporter_logs"."mimetype", "reporter_logs"."content_length", "reporter_logs"."pruned", "reporter_logs"."page_title", "reporter_logs"."user_agent", "reporter_logs"."domain_name", "reporter_logs"."rule_type", "reporter_logs"."tally_stats", "reporter_logs"."cat_stats", "reporter_logs"."grade_stats", "reporter_logs"."ignored", "reporter_logs"."category", "reporter_logs"."genre", "reporter_logs"."grade", "reporter_logs"."top_grade", "reporter_logs"."category_confidence", "reporter_logs"."grade_confidence", COUNT(CASE WHEN "reporter_logs"."action" = \'a\' THEN 1 WHEN "reporter_logs"."action" = \'ae\' THEN 1 ELSE 0 END) AS "allowed" FROM "reporter_logs" GROUP BY "reporter_logs"."id", "reporter_logs"."date", "reporter_logs"."userip", "reporter_logs"."action", "reporter_logs"."url", "reporter_logs"."method", "reporter_logs"."status", "reporter_logs"."mimetype", "reporter_logs"."content_length", "reporter_logs"."pruned", "reporter_logs"."page_title", "reporter_logs"."user_agent", "reporter_logs"."domain_name", "reporter_logs"."rule_type", "reporter_logs"."tally_stats", "reporter_logs"."cat_stats", "reporter_logs"."grade_stats", "reporter_logs"."ignored", "reporter_logs"."category", "reporter_logs"."genre", "reporter_logs"."grade", "reporter_logs"."top_grade", "reporter_logs"."category_confidence", "reporter_logs"."grade_confidence" LIMIT 21', 'time': '0.002'}]
Any suggestions would be appreciated. Including suggestions about alternate ways to write the query.
Try by not using Q function, use separated When instead:
Logs.objects.annotate(
allowed=Count(Case(
When(action='a', then=1),
When(action='ae', then=1),
default=0,
output_field=IntegerField()
)
)).values('date')
Well, thanks #knbk and #Gocht for trying. I finally found the solution to my problem. I had to specify that I only wanted the domain_name column, which took care of the traceback.
Also, I had to use Sum instead of Count to return 0 for all domains that had no matches. Count returned 1 for nonmatching values.
Logs.objects.values('domain_name').annotate(
allowed=Sum(Case(
When(Q(action='a') |Q(action='ae'), then=Value(1)),
default=Value(0),
output_field=IntegerField()
)
))

Categories

Resources