AWS S3 - boto3 LibraryNotFoundError HTTPClientError - python

I tried to print out the files I have in an S3 bucket. It worked well yesterday but somehow I got the LibraryNotFoundError and HTTPClientError today (I already made sure I have the boto3 library installed). I would appreciate the instruction from you. Thank you.
My code:
import boto3
s3 = boto3.client('s3', aws_access_key_id='my_access_key',aws_secret_access_key='my_secret_key')
bucketname='bucket_name'
s3_client = boto3.resource('s3')
bucket = s3_client.Bucket(bucketname)
for obj in bucket.objects.all():
key = obj.key
print(key)
The error message:
---------------------------------------------------------------------------
LibraryNotFoundError Traceback (most recent call last)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/httpsession.py in send(self, request)
261 decode_content=False,
--> 262 chunked=self._chunked(request.headers),
263 )
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
671 headers=headers,
--> 672 chunked=chunked,
673 )
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
375 try:
--> 376 self._validate_conn(conn)
377 except (SocketTimeout, BaseSSLError) as e:
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connectionpool.py in _validate_conn(self, conn)
993 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
--> 994 conn.connect()
995
~/opt/anaconda3/lib/python3.7/site-packages/urllib3/connection.py in connect(self)
359 server_hostname=server_hostname,
--> 360 ssl_context=context,
361 )
~/opt/anaconda3/lib/python3.7/site-packages/snowflake/connector/ssl_wrap_socket.py in ssl_wrap_socket_with_ocsp(*args, **kwargs)
400
--> 401 from .ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
402
~/opt/anaconda3/lib/python3.7/site-packages/snowflake/connector/ocsp_asn1crypto.py in <module>
33 use_openssl(libcrypto_path='/usr/lib/libcrypto.35.dylib', libssl_path='/usr/lib/libssl.35.dylib')
---> 34 from oscrypto import asymmetric
35
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/asymmetric.py in <module>
18 )
---> 19 from ._asymmetric import _unwrap_private_key_info
20 from ._errors import pretty_message
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_asymmetric.py in <module>
26
---> 27 from .kdf import pbkdf1, pbkdf2, pkcs12_kdf
28 from .symmetric import (
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/kdf.py in <module>
8 from . import backend
----> 9 from .util import rand_bytes
10 from ._types import type_name, byte_cls, int_types
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/util.py in <module>
9 if sys.platform == 'darwin':
---> 10 from ._mac.util import rand_bytes
11 elif sys.platform == 'win32':
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/util.py in <module>
10 from ._common_crypto import CommonCrypto, CommonCryptoConst
---> 11 from ._security import Security
12
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/_security.py in <module>
8 if ffi() == 'cffi':
----> 9 from ._security_cffi import Security, version_info as osx_version_info
10 from ._core_foundation_cffi import CoreFoundation, CFHelpers
~/opt/anaconda3/lib/python3.7/site-packages/oscrypto/_mac/_security_cffi.py in <module>
238 if not security_path:
--> 239 raise LibraryNotFoundError('The library Security could not be found')
240
LibraryNotFoundError: The library Security could not be found
During handling of the above exception, another exception occurred:
HTTPClientError Traceback (most recent call last)
<ipython-input-6-4a14fd7aca9a> in <module>
3 bucket = s3_client.Bucket(bucketname)
4
----> 5 for obj in bucket.objects.all():
6 key = obj.key
7 print(key)
~/opt/anaconda3/lib/python3.7/site-packages/boto3/resources/collection.py in __iter__(self)
81
82 count = 0
---> 83 for page in self.pages():
84 for item in page:
85 yield item
~/opt/anaconda3/lib/python3.7/site-packages/boto3/resources/collection.py in pages(self)
164 # we start processing and yielding individual items.
165 count = 0
--> 166 for page in pages:
167 page_items = []
168 for item in self._handler(self._parent, params, page):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/paginate.py in __iter__(self)
253 self._inject_starting_params(current_kwargs)
254 while True:
--> 255 response = self._make_request(current_kwargs)
256 parsed = self._extract_parsed_response(response)
257 if first_request:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/paginate.py in _make_request(self, current_kwargs)
330
331 def _make_request(self, current_kwargs):
--> 332 return self._method(**current_kwargs)
333
334 def _extract_parsed_response(self, response):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _api_call(self, *args, **kwargs)
355 "%s() only accepts keyword arguments." % py_operation_name)
356 # The "self" in this scope is referring to the BaseClient.
--> 357 return self._make_api_call(operation_name, kwargs)
358
359 _api_call.__name__ = str(py_operation_name)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _make_api_call(self, operation_name, api_params)
646 else:
647 http, parsed_response = self._make_request(
--> 648 operation_model, request_dict, request_context)
649
650 self.meta.events.emit(
~/opt/anaconda3/lib/python3.7/site-packages/botocore/client.py in _make_request(self, operation_model, request_dict, request_context)
665 def _make_request(self, operation_model, request_dict, request_context):
666 try:
--> 667 return self._endpoint.make_request(operation_model, request_dict)
668 except Exception as e:
669 self.meta.events.emit(
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in make_request(self, operation_model, request_dict)
100 logger.debug("Making request for %s with params: %s",
101 operation_model, request_dict)
--> 102 return self._send_request(request_dict, operation_model)
103
104 def create_request(self, params, operation_model=None):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _send_request(self, request_dict, operation_model)
135 request, operation_model, context)
136 while self._needs_retry(attempts, operation_model, request_dict,
--> 137 success_response, exception):
138 attempts += 1
139 # If there is a stream associated with the request, we need
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _needs_retry(self, attempts, operation_model, request_dict, response, caught_exception)
229 event_name, response=response, endpoint=self,
230 operation=operation_model, attempts=attempts,
--> 231 caught_exception=caught_exception, request_dict=request_dict)
232 handler_response = first_non_none_response(responses)
233 if handler_response is None:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
354 def emit(self, event_name, **kwargs):
355 aliased_event_name = self._alias_event_name(event_name)
--> 356 return self._emitter.emit(aliased_event_name, **kwargs)
357
358 def emit_until_response(self, event_name, **kwargs):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
226 handlers.
227 """
--> 228 return self._emit(event_name, kwargs)
229
230 def emit_until_response(self, event_name, **kwargs):
~/opt/anaconda3/lib/python3.7/site-packages/botocore/hooks.py in _emit(self, event_name, kwargs, stop_on_response)
209 for handler in handlers_to_call:
210 logger.debug('Event %s: calling handler %s', event_name, handler)
--> 211 response = handler(**kwargs)
212 responses.append((handler, response))
213 if stop_on_response and response is not None:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempts, response, caught_exception, **kwargs)
181
182 """
--> 183 if self._checker(attempts, response, caught_exception):
184 result = self._action(attempts=attempts)
185 logger.debug("Retry needed, action of: %s", result)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
249 def __call__(self, attempt_number, response, caught_exception):
250 should_retry = self._should_retry(attempt_number, response,
--> 251 caught_exception)
252 if should_retry:
253 if attempt_number >= self._max_attempts:
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in _should_retry(self, attempt_number, response, caught_exception)
267 attempt_number < self._max_attempts:
268 try:
--> 269 return self._checker(attempt_number, response, caught_exception)
270 except self._retryable_exceptions as e:
271 logger.debug("retry needed, retryable exception caught: %s",
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
315 for checker in self._checkers:
316 checker_response = checker(attempt_number, response,
--> 317 caught_exception)
318 if checker_response:
319 return checker_response
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in __call__(self, attempt_number, response, caught_exception)
221 elif caught_exception is not None:
222 return self._check_caught_exception(
--> 223 attempt_number, caught_exception)
224 else:
225 raise ValueError("Both response and caught_exception are None.")
~/opt/anaconda3/lib/python3.7/site-packages/botocore/retryhandler.py in _check_caught_exception(self, attempt_number, caught_exception)
357 # the MaxAttemptsDecorator is not interested in retrying the exception
358 # then this exception just propogates out past the retry code.
--> 359 raise caught_exception
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _do_get_response(self, request, operation_model)
198 http_response = first_non_none_response(responses)
199 if http_response is None:
--> 200 http_response = self._send(request)
201 except HTTPClientError as e:
202 return (None, e)
~/opt/anaconda3/lib/python3.7/site-packages/botocore/endpoint.py in _send(self, request)
242
243 def _send(self, request):
--> 244 return self.http_session.send(request)
245
246
~/opt/anaconda3/lib/python3.7/site-packages/botocore/httpsession.py in send(self, request)
296 message = 'Exception received when sending urllib3 HTTP request'
297 logger.debug(message, exc_info=True)
--> 298 raise HTTPClientError(error=e)
HTTPClientError: An HTTP Client raised and unhandled exception: The library Security could not be found

Related

Error while connecting to snowflake via python "UnpicklingError: invalid load key, '\x00'"

I am getting the following error while connecting to snowflake via python using snowflake.connector.connect
import snowflake.connector #pip install snowflake-connector-python
#i am getting the env from .env file i stored locally
cnx = snowflake.connector.connect(user=os.getenv('USER'),password=os.getenv('PASSWORD'),account=os.getenv('ACCOUNT'),warehouse=os.getenv('WAREHOUSE'), database=db,schema=schema )
This was working fine until today when my system crashed while running a python code and i had to hard reboot. I have tried many things like deleting python, anaconda and all its related files in the Users folder and reinstalling a new anaconda version. But still the same error. Here is the Complete error message. Help appreciated.
i even tried hardcoding the username, pass and rest as variable. But still the same error. So the error has nothing to do with .env file.
---------------------------------------------------------------------------
UnpicklingError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_18464\4059644960.py in <module>
23 db='DB_SANDBOX'
24 schema='PUBLIC'
---> 25 cnx = snowflake.connector.connect(user=os.getenv('USER'),password=os.getenv('PASSWORD'),account=os.getenv('ACCOUNT'),warehouse=os.getenv('WAREHOUSE'), database=db,schema=schema )
26
27 query ='''
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\__init__.py in Connect(**kwargs)
49
50 def Connect(**kwargs) -> SnowflakeConnection:
---> 51 return SnowflakeConnection(**kwargs)
52
53
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __init__(self, **kwargs)
295 self.converter = None
296 self.__set_error_attributes()
--> 297 self.connect(**kwargs)
298 self._telemetry = TelemetryClient(self._rest)
299
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in connect(self, **kwargs)
548 connection_diag.generate_report()
549 else:
--> 550 self.__open_connection()
551
552 def close(self, retry=True):
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __open_connection(self)
787 auth = Auth(self.rest)
788 auth.read_temporary_credentials(self.host, self.user, self._session_parameters)
--> 789 self._authenticate(auth_instance)
790
791 self._password = None # ensure password won't persist
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in _authenticate(self, auth_instance)
1050 # make some changes if needed before real __authenticate
1051 try:
-> 1052 self.__authenticate(self.__preprocess_auth_instance(auth_instance))
1053 except ReauthenticationRequest as ex:
1054 # cached id_token expiration error, we have cleaned id_token and try to authenticate again
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __authenticate(self, auth_instance)
1070 auth = Auth(self.rest)
1071 try:
-> 1072 auth.authenticate(
1073 auth_instance=auth_instance,
1074 account=self.account,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\auth.py in authenticate(self, auth_instance, account, user, database, schema, warehouse, role, passcode, passcode_in_password, mfa_callback, password_callback, session_parameters, timeout)
255
256 try:
--> 257 ret = self._rest._post_request(
258 url,
259 headers,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _post_request(self, url, headers, body, token, timeout, _no_results, no_retry, socket_timeout, _include_retry_params)
702 pprint(ret)
703
--> 704 ret = self.fetch(
705 "post",
706 full_url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in fetch(self, method, full_url, headers, data, timeout, **kwargs)
792 retry_ctx = RetryCtx(timeout, include_retry_params)
793 while True:
--> 794 ret = self._request_exec_wrapper(
795 session, method, full_url, headers, data, retry_ctx, **kwargs
796 )
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec_wrapper(self, session, method, full_url, headers, data, retry_ctx, no_retry, token, **kwargs)
915 except Exception as e:
916 if not no_retry:
--> 917 raise e
918 logger.debug("Ignored error", exc_info=True)
919 return {}
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec_wrapper(self, session, method, full_url, headers, data, retry_ctx, no_retry, token, **kwargs)
835 full_url = SnowflakeRestful.add_request_guid(full_url)
836 try:
--> 837 return_object = self._request_exec(
838 session=session,
839 method=method,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec(self, session, method, full_url, headers, data, token, catch_okta_unauthorized_error, is_raw_text, is_raw_binary, binary_data_handler, socket_timeout)
1114 stack_trace=traceback.format_exc(),
1115 )
-> 1116 raise err
1117
1118 def make_requests_session(self):
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec(self, session, method, full_url, headers, data, token, catch_okta_unauthorized_error, is_raw_text, is_raw_binary, binary_data_handler, socket_timeout)
1016 # the response within the time. If not, ConnectReadTimeout or
1017 # ReadTimeout is raised.
-> 1018 raw_ret = session.request(
1019 method=method,
1020 url=full_url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
585 }
586 send_kwargs.update(settings)
--> 587 resp = self.send(prep, **send_kwargs)
588
589 return resp
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\sessions.py in send(self, request, **kwargs)
699
700 # Send the request
--> 701 r = adapter.send(request, **kwargs)
702
703 # Total elapsed time of the request (approximately)
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
487 try:
488 if not chunked:
--> 489 resp = conn.urlopen(
490 method=request.method,
491 url=url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
701
702 # Make the request on the httplib connection object.
--> 703 httplib_response = self._make_request(
704 conn,
705 method,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
384 # Trigger any extra validation we need to do.
385 try:
--> 386 self._validate_conn(conn)
387 except (SocketTimeout, BaseSSLError) as e:
388 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in _validate_conn(self, conn)
1040 # Force connect early to allow us to validate the connection.
1041 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
-> 1042 conn.connect()
1043
1044 if not conn.is_verified:
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connection.py in connect(self)
412 context.load_default_certs()
413
--> 414 self.sock = ssl_wrap_socket(
415 sock=conn,
416 keyfile=self.key_file,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ssl_wrap_socket.py in ssl_wrap_socket_with_ocsp(*args, **kwargs)
76 ret = ssl_.ssl_wrap_socket(*args, **kwargs)
77
---> 78 from .ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
79
80 log.debug(
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ocsp_asn1crypto.py in <module>
45 )
46 from snowflake.connector.errors import RevocationCheckError
---> 47 from snowflake.connector.ocsp_snowflake import SnowflakeOCSP, generate_cache_key
48
49 with warnings.catch_warnings():
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ocsp_snowflake.py in <module>
79 tuple[bytes, bytes, bytes],
80 OCSPResponseValidationResult,
---> 81 ] = SFDictFileCache(
82 entry_lifetime=constants.DAY_IN_SECONDS,
83 file_path={
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\cache.py in __init__(self, file_path, entry_lifetime, file_timeout)
404 self.last_loaded: datetime.datetime | None = None
405 if os.path.exists(self.file_path):
--> 406 self._load()
407
408 def _getitem_non_locking(
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\cache.py in _load(self)
485 try:
486 with open(self.file_path, "rb") as r_file:
--> 487 other = pickle.load(r_file)
488 self._update(
489 other,
UnpicklingError: invalid load key, '\x00'.
This is probably a corrupted cache, which you should try deleting. The default cache directories are documented here. On Windows the default to store the cache in
%USERPROFILE%\AppData\Local\Snowflake\Caches

trino-python-client oauth in jupyterlab

I have installed trino-python-client in custom jupyterlab image and it works if I use basic authentication.
I want to use the OAuth services I have set up in trino to make things simpler for users.
This also works fine in ipython3 see below. But doesn't work in jupyterlab.
Jupyterlab is hiding the redirect URL, never prints out the redirection URL.
In ipython3 is see this
In [1]: import trino
...: conn = trino.dbapi.connect(
...: host='trino.somedomain.net',
...: port=443,
...: user='first.last',
...: catalog='iceberg',
...: schema='ds_scratch',
...: http_scheme='https',
...: auth=trino.auth.OAuth2Authentication(),
...: )
...: cur = conn.cursor()
In [2]: cur.execute('SELECT * FROM system.runtime.nodes')
Open the following URL in the browser for the external authentication:
https://trino.somedomain.net/oauth2/token/initiate/042f6e4167d4e6a3f70068ec4389037c2b9c34f3ec356ddc5522a3e13e179fd9
In [3]: rows = cur.fetchall()
In [4]: print(rows)
[['trino-coordinator-69dffc6f9f-pvpg4',...]]
I can see the redirection URL and complete the OAuth flow and everything works.
But in jupyterlab it never prints the URL. How do I change this?
In jpyterlab I see the following.
In [1]: import trino
In [2]: conn = trino.dbapi.connect(
host='trino.somedomain.net',
port=443,
user='first.last',
catalog='iceberg',
schema='ds_scratch',
http_scheme='https',
auth=trino.auth.OAuth2Authentication(),
)
In [3]: cur = conn.cursor()
In [4]: cur.execute('show tables from iceberg.ds_scratch')
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
/tmp/ipykernel_170/588296735.py in <module>
----> 1 cur.execute('show tables from iceberg.ds_scratch')
/opt/conda/lib/python3.9/site-packages/trino/dbapi.py in execute(self, operation, params)
434 else:
435 self._query = trino.client.TrinoQuery(self._request, sql=operation)
--> 436 result = self._query.execute()
437 self._iterator = iter(result)
438 return result
/opt/conda/lib/python3.9/site-packages/trino/client.py in execute(self, additional_http_headers)
523 raise exceptions.TrinoUserError("Query has been cancelled", self.query_id)
524
--> 525 response = self._request.post(self._sql, additional_http_headers)
526 status = self._request.process(response)
527 self._info_uri = status.info_uri
/opt/conda/lib/python3.9/site-packages/trino/client.py in post(self, sql, additional_http_headers)
337 http_headers.update(additional_http_headers or {})
338
--> 339 http_response = self._post(
340 self.statement_url,
341 data=data,
/opt/conda/lib/python3.9/site-packages/trino/exceptions.py in decorated(*args, **kwargs)
121 for attempt in range(1, max_attempts + 1):
122 try:
--> 123 result = func(*args, **kwargs)
124 if any(guard(result) for guard in conditions):
125 handle_retry.retry(func, args, kwargs, None, attempt)
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in post(self, url, data, json, **kwargs)
588 """
589
--> 590 return self.request('POST', url, data=data, json=json, **kwargs)
591
592 def put(self, url, data=None, **kwargs):
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
540 }
541 send_kwargs.update(settings)
--> 542 resp = self.send(prep, **send_kwargs)
543
544 return resp
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in send(self, request, **kwargs)
653
654 # Send the request
--> 655 r = adapter.send(request, **kwargs)
656
657 # Total elapsed time of the request (approximately)
/opt/conda/lib/python3.9/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
437 try:
438 if not chunked:
--> 439 resp = conn.urlopen(
440 method=request.method,
441 url=url,
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
668
669 # Make the request on the httplib connection object.
--> 670 httplib_response = self._make_request(
671 conn,
672 method,
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 # Trigger any extra validation we need to do.
380 try:
--> 381 self._validate_conn(conn)
382 except (SocketTimeout, BaseSSLError) as e:
383 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in _validate_conn(self, conn)
976 # Force connect early to allow us to validate the connection.
977 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
--> 978 conn.connect()
979
980 if not conn.is_verified:
/opt/conda/lib/python3.9/site-packages/urllib3/connection.py in connect(self)
307 def connect(self):
308 # Add certificate verification
--> 309 conn = self._new_conn()
310 hostname = self.host
311
/opt/conda/lib/python3.9/site-packages/urllib3/connection.py in _new_conn(self)
157
158 try:
--> 159 conn = connection.create_connection(
160 (self._dns_host, self.port), self.timeout, **extra_kw
161 )
/opt/conda/lib/python3.9/site-packages/urllib3/util/connection.py in create_connection(address, timeout, source_address, socket_options)
72 if source_address:
73 sock.bind(source_address)
---> 74 sock.connect(sa)
75 return sock
76
KeyboardInterrupt:
In [5]: rows = cur.fetchall()

Trying to download specific type of file from S3 using Boto3 - TypeError: expected string or bytes-like object

I'm trying to download only the most recent .csv files from my S3 bucket and am running into an error that says "TypeError: expected string or bytes-like object."
I currently have working code that identifies the last modified S3 objects, sorts these objects, and puts them into a list named latest_files.
session = boto3.Session()
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket('chansbucket')
get_last_modified = lambda obj: int(obj.last_modified.strftime('%s'))
unsorted = []
# filters through the bucket and appends objects to the unsorted list
for file in my_bucket.objects.filter():
unsorted.append(file)
# sorts last five files in unsorted by last modified time
latest_files = [obj.key for obj in sorted(unsorted, key=get_last_modified, reverse=True)][0:5]
Now I want to loop through latest_files and download only those that end with .csv.
for file in latest_files:
if file.endswith('.csv'):
s3_resource.meta.client.download_file(my_bucket, file, '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + file)
Here's where I get the error TypeError: expected string or bytes-like object
Here's the traceback:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-27-ca90c5ad9c53> in <module>()
1 for file in latest_files:
2 if file.endswith('.csv'):
----> 3 s3_resource.meta.client.download_file(my_bucket, str(file), '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + str(file))
4
5
~/anaconda/lib/python3.6/site-packages/boto3/s3/inject.py in download_file(self, Bucket, Key, Filename, ExtraArgs, Callback, Config)
170 return transfer.download_file(
171 bucket=Bucket, key=Key, filename=Filename,
--> 172 extra_args=ExtraArgs, callback=Callback)
173
174
~/anaconda/lib/python3.6/site-packages/boto3/s3/transfer.py in download_file(self, bucket, key, filename, extra_args, callback)
305 bucket, key, filename, extra_args, subscribers)
306 try:
--> 307 future.result()
308 # This is for backwards compatibility where when retries are
309 # exceeded we need to throw the same error from boto3 instead of
~/anaconda/lib/python3.6/site-packages/s3transfer/futures.py in result(self)
71 # however if a KeyboardInterrupt is raised we want want to exit
72 # out of this and propogate the exception.
---> 73 return self._coordinator.result()
74 except KeyboardInterrupt as e:
75 self.cancel()
~/anaconda/lib/python3.6/site-packages/s3transfer/futures.py in result(self)
231 # final result.
232 if self._exception:
--> 233 raise self._exception
234 return self._result
235
~/anaconda/lib/python3.6/site-packages/s3transfer/tasks.py in _main(self, transfer_future, **kwargs)
253 # Call the submit method to start submitting tasks to execute the
254 # transfer.
--> 255 self._submit(transfer_future=transfer_future, **kwargs)
256 except BaseException as e:
257 # If there was an exception raised during the submission of task
~/anaconda/lib/python3.6/site-packages/s3transfer/download.py in _submit(self, client, config, osutil, request_executor, io_executor, transfer_future, bandwidth_limiter)
351 Bucket=transfer_future.meta.call_args.bucket,
352 Key=transfer_future.meta.call_args.key,
--> 353 **transfer_future.meta.call_args.extra_args
354 )
355 transfer_future.meta.provide_transfer_size(
~/.local/lib/python3.6/site-packages/botocore/client.py in _api_call(self, *args, **kwargs)
318 "%s() only accepts keyword arguments." % py_operation_name)
319 # The "self" in this scope is referring to the BaseClient.
--> 320 return self._make_api_call(operation_name, kwargs)
321
322 _api_call.__name__ = str(py_operation_name)
~/.local/lib/python3.6/site-packages/botocore/client.py in _make_api_call(self, operation_name, api_params)
594 }
595 request_dict = self._convert_to_request_dict(
--> 596 api_params, operation_model, context=request_context)
597
598 service_id = self._service_model.service_id.hyphenize()
~/.local/lib/python3.6/site-packages/botocore/client.py in _convert_to_request_dict(self, api_params, operation_model, context)
628 context=None):
629 api_params = self._emit_api_params(
--> 630 api_params, operation_model, context)
631 request_dict = self._serializer.serialize_to_request(
632 api_params, operation_model)
~/.local/lib/python3.6/site-packages/botocore/client.py in _emit_api_params(self, api_params, operation_model, context)
658 service_id=service_id,
659 operation_name=operation_name),
--> 660 params=api_params, model=operation_model, context=context)
661 return api_params
662
~/.local/lib/python3.6/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
354 def emit(self, event_name, **kwargs):
355 aliased_event_name = self._alias_event_name(event_name)
--> 356 return self._emitter.emit(aliased_event_name, **kwargs)
357
358 def emit_until_response(self, event_name, **kwargs):
~/.local/lib/python3.6/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
226 handlers.
227 """
--> 228 return self._emit(event_name, kwargs)
229
230 def emit_until_response(self, event_name, **kwargs):
~/.local/lib/python3.6/site-packages/botocore/hooks.py in _emit(self, event_name, kwargs, stop_on_response)
209 for handler in handlers_to_call:
210 logger.debug('Event %s: calling handler %s', event_name, handler)
--> 211 response = handler(**kwargs)
212 responses.append((handler, response))
213 if stop_on_response and response is not None:
~/.local/lib/python3.6/site-packages/botocore/handlers.py in validate_bucket_name(params, **kwargs)
216 return
217 bucket = params['Bucket']
--> 218 if VALID_BUCKET.search(bucket) is None:
219 error_msg = (
220 'Invalid bucket name "%s": Bucket name must match '
TypeError: expected string or bytes-like object
Can you help? I feel like it's something pretty simple, but I'm a total noob and have been pounding my head to my desk forever on this. Any help is appreciated.
Thanks!
The issue with this line:
s3_resource.meta.client.download_file(my_bucket, file, '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + file)
is that
my_bucket = s3_resource.Bucket('chansbucket')
is returning a Bucket object while download_file() just wants a bucket name as a string, such as:
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Also, I think that the latest_files =... line should not be indented.

Selenium with python keeps on getting timedout even after I have manually set a really high timeout value

I am trying to use webdriver.Firefox() in my project. My firefox version is 54.0 and selenium version is 3.4.3.
I have tried:
driver = webdriver.Firefox()
but this gives an error message
timeout: timed out
while opening the browser in less than a second so this definitely isn't a timeout issue. I have tried manually setting the timeout as,
driver = webdriver.Firefox(timeout=100000)
Now it works fine at times but gives time out during opening browser and sometimes and sometimes gives time out while making a request even when the time taken for that request was very less, maybe around 2 seconds or less.
Is there an obvious problem here?
This is the full stacktrace
timeout Traceback (most recent call last)
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/test_sprint_registration.py in <module>()
24
25 if __name__ == '__main__':
---> 26 unittest.main()
/usr/lib/python2.7/unittest/main.pyc in __init__(self, module, defaultTest, argv, testRunner, testLoader, exit, verbosity, failfast, catchbreak, buffer)
92 self.testLoader = testLoader
93 self.progName = os.path.basename(argv[0])
---> 94 self.parseArgs(argv)
95 self.runTests()
96
/usr/lib/python2.7/unittest/main.pyc in parseArgs(self, argv)
147 else:
148 self.testNames = (self.defaultTest,)
--> 149 self.createTests()
150 except getopt.error, msg:
151 self.usageExit(msg)
/usr/lib/python2.7/unittest/main.pyc in createTests(self)
153 def createTests(self):
154 if self.testNames is None:
--> 155 self.test = self.testLoader.loadTestsFromModule(self.module)
156 else:
157 self.test = self.testLoader.loadTestsFromNames(self.testNames,
/usr/lib/python2.7/unittest/loader.pyc in loadTestsFromModule(self, module, use_load_tests)
63 obj = getattr(module, name)
64 if isinstance(obj, type) and issubclass(obj, case.TestCase):
---> 65 tests.append(self.loadTestsFromTestCase(obj))
66
67 load_tests = getattr(module, 'load_tests', None)
/usr/lib/python2.7/unittest/loader.pyc in loadTestsFromTestCase(self, testCaseClass)
54 if not testCaseNames and hasattr(testCaseClass, 'runTest'):
55 testCaseNames = ['runTest']
---> 56 loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
57 return loaded_suite
58
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/test_sprint_registration.py in __init__(self, *args, **kwargs)
7
8 def __init__(self, *args, **kwargs):
----> 9 super(RegistrationLandingPageTestCase, self).__init__(*args, **kwargs)
10 user = self.get_user()
11 self.single_phase_hack = self.get_single_phase_hackathon()
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/sprints/utils.pyc in __init__(self, *args, **kwargs)
6 def __init__(self, *args, **kwargs):
7 super(SprintFunctionalTestCase, self).__init__(
----> 8 *args, **kwargs)
9 user = self.get_user()
10 self.login(user)
/home/hackerearth/webapps/django/mycareerstack/apps/functional_tests/utils.pyc in __init__(self, *args, **kwargs)
26 def __init__(self, *args, **kwargs):
27 super(HEFunctionalTestCase, self).__init__(*args, **kwargs)
---> 28 self.browser = webdriver.Firefox(timeout=10000)
29 self.homepage = 'http://localhost:8000'
30 self.keys = Keys
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/firefox/webdriver.pyc in __init__(self, firefox_profile, firefox_binary, timeout, capabilities, proxy, executable_path, firefox_options, log_path)
150 command_executor=executor,
151 desired_capabilities=capabilities,
--> 152 keep_alive=True)
153
154 # Selenium remote
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in __init__(self, command_executor, desired_capabilities, browser_profile, proxy, keep_alive, file_detector)
96 warnings.warn("Please use FirefoxOptions to set browser profile",
97 DeprecationWarning)
---> 98 self.start_session(desired_capabilities, browser_profile)
99 self._switch_to = SwitchTo(self)
100 self._mobile = Mobile(self)
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in start_session(self, capabilities, browser_profile)
186 parameters = {"capabilities": w3c_caps,
187 "desiredCapabilities": capabilities}
--> 188 response = self.execute(Command.NEW_SESSION, parameters)
189 if 'sessionId' not in response:
190 response = response['value']
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/webdriver.pyc in execute(self, driver_command, params)
252
253 params = self._wrap_value(params)
--> 254 response = self.command_executor.execute(driver_command, params)
255 if response:
256 self.error_handler.check_response(response)
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/remote_connection.pyc in execute(self, command, params)
462 path = string.Template(command_info[1]).substitute(params)
463 url = '%s%s' % (self._url, path)
--> 464 return self._request(command_info[0], url, body=data)
465
466 def _request(self, method, url, body=None):
/usr/local/lib/python2.7/dist-packages/selenium/webdriver/remote/remote_connection.pyc in _request(self, method, url, body)
486 try:
487 self._conn.request(method, parsed_url.path, body, headers)
--> 488 resp = self._conn.getresponse()
489 except (httplib.HTTPException, socket.error):
490 self._conn.close()
/usr/lib/python2.7/httplib.pyc in getresponse(self, buffering)
1134
1135 try:
-> 1136 response.begin()
1137 assert response.will_close != _UNKNOWN
1138 self.__state = _CS_IDLE
/usr/lib/python2.7/httplib.pyc in begin(self)
451 # read until we get a non-100 response
452 while True:
--> 453 version, status, reason = self._read_status()
454 if status != CONTINUE:
455 break
/usr/lib/python2.7/httplib.pyc in _read_status(self)
407 def _read_status(self):
408 # Initialize with Simple-Response defaults
--> 409 line = self.fp.readline(_MAXLINE + 1)
410 if len(line) > _MAXLINE:
411 raise LineTooLong("header line")
/usr/lib/python2.7/socket.pyc in readline(self, size)
478 while True:
479 try:
--> 480 data = self._sock.recv(self._rbufsize)
481 except error, e:
482 if e.args[0] == EINTR:
timeout: timed out

Unable to execute commands in remote mongo using pymongo

I am able to connect to my remote db (after authentication, of course) to a database. But I am not able to execute any commands or even list collections.
mongo_url = "blah.com:12345"
db_name = "db_name"
db_user_name = "user"
db_password = "password"
mongo_uri = "mongodb://" + db_user_name + ":" + db_password + "#" + mongo_url + "/" + db_name
connection = pymongo.MongoClient(mongo_uri)
db = connection[db_name]
print db.authenticate(db_user_name, db_password) // Returns True
However I am not able to use commands like :
db.collection_names() or any command using db.command()
I get this error stack (sayng Authentcation failed):
---------------------------------------------------------------------------
OperationFailure Traceback (most recent call last)
<ipython-input-13-1840c0979539> in <module>()
----> 1 db.collection_names()
D:\Continuum\Anaconda2\lib\site-packages\pymongo\database.pyc in collection_names(self, include_system_collections)
515 """
516 with self.__client._socket_for_reads(
--> 517 ReadPreference.PRIMARY) as (sock_info, slave_okay):
518
519 wire_version = sock_info.max_wire_version
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _socket_for_reads(self, read_preference)
796 topology = self._get_topology()
797 single = topology.description.topology_type == TOPOLOGY_TYPE.Single
--> 798 with self._get_socket(read_preference) as sock_info:
799 slave_ok = (single and not sock_info.is_mongos) or (
800 preference != ReadPreference.PRIMARY)
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\mongo_client.pyc in _get_socket(self, selector)
762 server = self._get_topology().select_server(selector)
763 try:
--> 764 with server.get_socket(self.__all_credentials) as sock_info:
765 yield sock_info
766 except NetworkTimeout:
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\server.pyc in get_socket(self, all_credentials, checkout)
161 #contextlib.contextmanager
162 def get_socket(self, all_credentials, checkout=False):
--> 163 with self.pool.get_socket(all_credentials, checkout) as sock_info:
164 yield sock_info
165
D:\Continuum\Anaconda2\lib\contextlib.pyc in __enter__(self)
15 def __enter__(self):
16 try:
---> 17 return self.gen.next()
18 except StopIteration:
19 raise RuntimeError("generator didn't yield")
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in get_socket(self, all_credentials, checkout)
582 sock_info = self._get_socket_no_auth()
583 try:
--> 584 sock_info.check_auth(all_credentials)
585 yield sock_info
586 except:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in check_auth(self, all_credentials)
330
331 for credentials in cached - authset:
--> 332 auth.authenticate(credentials, self)
333 self.authset.add(credentials)
334
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in authenticate(credentials, sock_info)
462 mechanism = credentials.mechanism
463 auth_func = _AUTH_MAP.get(mechanism)
--> 464 auth_func(credentials, sock_info)
465
466
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_default(credentials, sock_info)
442 def _authenticate_default(credentials, sock_info):
443 if sock_info.max_wire_version >= 3:
--> 444 return _authenticate_scram_sha1(credentials, sock_info)
445 else:
446 return _authenticate_mongo_cr(credentials, sock_info)
D:\Continuum\Anaconda2\lib\site-packages\pymongo\auth.pyc in _authenticate_scram_sha1(credentials, sock_info)
226 ('conversationId', res['conversationId']),
227 ('payload', Binary(client_final))])
--> 228 res = sock_info.command(source, cmd)
229
230 parsed = _parse_scram_response(res['payload'])
D:\Continuum\Anaconda2\lib\site-packages\pymongo\pool.pyc in command(self, dbname, spec, slave_ok, read_preference, codec_options, check, allowable_errors, check_keys, read_concern)
237 check, allowable_errors, self.address,
238 check_keys, self.listeners, self.max_bson_size,
--> 239 read_concern)
240 except OperationFailure:
241 raise
D:\Continuum\Anaconda2\lib\site-packages\pymongo\network.pyc in command(sock, dbname, spec, slave_ok, is_mongos, read_preference, codec_options, check, allowable_errors, address, check_keys, listeners, max_bson_size, read_concern)
100 response_doc = unpacked['data'][0]
101 if check:
--> 102 helpers._check_command_response(response_doc, None, allowable_errors)
103 except Exception as exc:
104 if publish:
D:\Continuum\Anaconda2\lib\site-packages\pymongo\helpers.pyc in _check_command_response(response, msg, allowable_errors)
203
204 msg = msg or "%s"
--> 205 raise OperationFailure(msg % errmsg, code, response)
206
207
OperationFailure: Authentication failed.
But I am able to do these operations from my mongo shell.

Categories

Resources