Related
I am getting the following error while connecting to snowflake via python using snowflake.connector.connect
import snowflake.connector #pip install snowflake-connector-python
#i am getting the env from .env file i stored locally
cnx = snowflake.connector.connect(user=os.getenv('USER'),password=os.getenv('PASSWORD'),account=os.getenv('ACCOUNT'),warehouse=os.getenv('WAREHOUSE'), database=db,schema=schema )
This was working fine until today when my system crashed while running a python code and i had to hard reboot. I have tried many things like deleting python, anaconda and all its related files in the Users folder and reinstalling a new anaconda version. But still the same error. Here is the Complete error message. Help appreciated.
i even tried hardcoding the username, pass and rest as variable. But still the same error. So the error has nothing to do with .env file.
---------------------------------------------------------------------------
UnpicklingError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_18464\4059644960.py in <module>
23 db='DB_SANDBOX'
24 schema='PUBLIC'
---> 25 cnx = snowflake.connector.connect(user=os.getenv('USER'),password=os.getenv('PASSWORD'),account=os.getenv('ACCOUNT'),warehouse=os.getenv('WAREHOUSE'), database=db,schema=schema )
26
27 query ='''
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\__init__.py in Connect(**kwargs)
49
50 def Connect(**kwargs) -> SnowflakeConnection:
---> 51 return SnowflakeConnection(**kwargs)
52
53
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __init__(self, **kwargs)
295 self.converter = None
296 self.__set_error_attributes()
--> 297 self.connect(**kwargs)
298 self._telemetry = TelemetryClient(self._rest)
299
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in connect(self, **kwargs)
548 connection_diag.generate_report()
549 else:
--> 550 self.__open_connection()
551
552 def close(self, retry=True):
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __open_connection(self)
787 auth = Auth(self.rest)
788 auth.read_temporary_credentials(self.host, self.user, self._session_parameters)
--> 789 self._authenticate(auth_instance)
790
791 self._password = None # ensure password won't persist
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in _authenticate(self, auth_instance)
1050 # make some changes if needed before real __authenticate
1051 try:
-> 1052 self.__authenticate(self.__preprocess_auth_instance(auth_instance))
1053 except ReauthenticationRequest as ex:
1054 # cached id_token expiration error, we have cleaned id_token and try to authenticate again
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\connection.py in __authenticate(self, auth_instance)
1070 auth = Auth(self.rest)
1071 try:
-> 1072 auth.authenticate(
1073 auth_instance=auth_instance,
1074 account=self.account,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\auth.py in authenticate(self, auth_instance, account, user, database, schema, warehouse, role, passcode, passcode_in_password, mfa_callback, password_callback, session_parameters, timeout)
255
256 try:
--> 257 ret = self._rest._post_request(
258 url,
259 headers,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _post_request(self, url, headers, body, token, timeout, _no_results, no_retry, socket_timeout, _include_retry_params)
702 pprint(ret)
703
--> 704 ret = self.fetch(
705 "post",
706 full_url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in fetch(self, method, full_url, headers, data, timeout, **kwargs)
792 retry_ctx = RetryCtx(timeout, include_retry_params)
793 while True:
--> 794 ret = self._request_exec_wrapper(
795 session, method, full_url, headers, data, retry_ctx, **kwargs
796 )
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec_wrapper(self, session, method, full_url, headers, data, retry_ctx, no_retry, token, **kwargs)
915 except Exception as e:
916 if not no_retry:
--> 917 raise e
918 logger.debug("Ignored error", exc_info=True)
919 return {}
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec_wrapper(self, session, method, full_url, headers, data, retry_ctx, no_retry, token, **kwargs)
835 full_url = SnowflakeRestful.add_request_guid(full_url)
836 try:
--> 837 return_object = self._request_exec(
838 session=session,
839 method=method,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec(self, session, method, full_url, headers, data, token, catch_okta_unauthorized_error, is_raw_text, is_raw_binary, binary_data_handler, socket_timeout)
1114 stack_trace=traceback.format_exc(),
1115 )
-> 1116 raise err
1117
1118 def make_requests_session(self):
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\network.py in _request_exec(self, session, method, full_url, headers, data, token, catch_okta_unauthorized_error, is_raw_text, is_raw_binary, binary_data_handler, socket_timeout)
1016 # the response within the time. If not, ConnectReadTimeout or
1017 # ReadTimeout is raised.
-> 1018 raw_ret = session.request(
1019 method=method,
1020 url=full_url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
585 }
586 send_kwargs.update(settings)
--> 587 resp = self.send(prep, **send_kwargs)
588
589 return resp
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\sessions.py in send(self, request, **kwargs)
699
700 # Send the request
--> 701 r = adapter.send(request, **kwargs)
702
703 # Total elapsed time of the request (approximately)
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
487 try:
488 if not chunked:
--> 489 resp = conn.urlopen(
490 method=request.method,
491 url=url,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
701
702 # Make the request on the httplib connection object.
--> 703 httplib_response = self._make_request(
704 conn,
705 method,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
384 # Trigger any extra validation we need to do.
385 try:
--> 386 self._validate_conn(conn)
387 except (SocketTimeout, BaseSSLError) as e:
388 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connectionpool.py in _validate_conn(self, conn)
1040 # Force connect early to allow us to validate the connection.
1041 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
-> 1042 conn.connect()
1043
1044 if not conn.is_verified:
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\vendored\urllib3\connection.py in connect(self)
412 context.load_default_certs()
413
--> 414 self.sock = ssl_wrap_socket(
415 sock=conn,
416 keyfile=self.key_file,
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ssl_wrap_socket.py in ssl_wrap_socket_with_ocsp(*args, **kwargs)
76 ret = ssl_.ssl_wrap_socket(*args, **kwargs)
77
---> 78 from .ocsp_asn1crypto import SnowflakeOCSPAsn1Crypto as SFOCSP
79
80 log.debug(
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ocsp_asn1crypto.py in <module>
45 )
46 from snowflake.connector.errors import RevocationCheckError
---> 47 from snowflake.connector.ocsp_snowflake import SnowflakeOCSP, generate_cache_key
48
49 with warnings.catch_warnings():
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\ocsp_snowflake.py in <module>
79 tuple[bytes, bytes, bytes],
80 OCSPResponseValidationResult,
---> 81 ] = SFDictFileCache(
82 entry_lifetime=constants.DAY_IN_SECONDS,
83 file_path={
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\cache.py in __init__(self, file_path, entry_lifetime, file_timeout)
404 self.last_loaded: datetime.datetime | None = None
405 if os.path.exists(self.file_path):
--> 406 self._load()
407
408 def _getitem_non_locking(
~\.conda\envs\py_38_env\lib\site-packages\snowflake\connector\cache.py in _load(self)
485 try:
486 with open(self.file_path, "rb") as r_file:
--> 487 other = pickle.load(r_file)
488 self._update(
489 other,
UnpicklingError: invalid load key, '\x00'.
This is probably a corrupted cache, which you should try deleting. The default cache directories are documented here. On Windows the default to store the cache in
%USERPROFILE%\AppData\Local\Snowflake\Caches
I have installed trino-python-client in custom jupyterlab image and it works if I use basic authentication.
I want to use the OAuth services I have set up in trino to make things simpler for users.
This also works fine in ipython3 see below. But doesn't work in jupyterlab.
Jupyterlab is hiding the redirect URL, never prints out the redirection URL.
In ipython3 is see this
In [1]: import trino
...: conn = trino.dbapi.connect(
...: host='trino.somedomain.net',
...: port=443,
...: user='first.last',
...: catalog='iceberg',
...: schema='ds_scratch',
...: http_scheme='https',
...: auth=trino.auth.OAuth2Authentication(),
...: )
...: cur = conn.cursor()
In [2]: cur.execute('SELECT * FROM system.runtime.nodes')
Open the following URL in the browser for the external authentication:
https://trino.somedomain.net/oauth2/token/initiate/042f6e4167d4e6a3f70068ec4389037c2b9c34f3ec356ddc5522a3e13e179fd9
In [3]: rows = cur.fetchall()
In [4]: print(rows)
[['trino-coordinator-69dffc6f9f-pvpg4',...]]
I can see the redirection URL and complete the OAuth flow and everything works.
But in jupyterlab it never prints the URL. How do I change this?
In jpyterlab I see the following.
In [1]: import trino
In [2]: conn = trino.dbapi.connect(
host='trino.somedomain.net',
port=443,
user='first.last',
catalog='iceberg',
schema='ds_scratch',
http_scheme='https',
auth=trino.auth.OAuth2Authentication(),
)
In [3]: cur = conn.cursor()
In [4]: cur.execute('show tables from iceberg.ds_scratch')
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
/tmp/ipykernel_170/588296735.py in <module>
----> 1 cur.execute('show tables from iceberg.ds_scratch')
/opt/conda/lib/python3.9/site-packages/trino/dbapi.py in execute(self, operation, params)
434 else:
435 self._query = trino.client.TrinoQuery(self._request, sql=operation)
--> 436 result = self._query.execute()
437 self._iterator = iter(result)
438 return result
/opt/conda/lib/python3.9/site-packages/trino/client.py in execute(self, additional_http_headers)
523 raise exceptions.TrinoUserError("Query has been cancelled", self.query_id)
524
--> 525 response = self._request.post(self._sql, additional_http_headers)
526 status = self._request.process(response)
527 self._info_uri = status.info_uri
/opt/conda/lib/python3.9/site-packages/trino/client.py in post(self, sql, additional_http_headers)
337 http_headers.update(additional_http_headers or {})
338
--> 339 http_response = self._post(
340 self.statement_url,
341 data=data,
/opt/conda/lib/python3.9/site-packages/trino/exceptions.py in decorated(*args, **kwargs)
121 for attempt in range(1, max_attempts + 1):
122 try:
--> 123 result = func(*args, **kwargs)
124 if any(guard(result) for guard in conditions):
125 handle_retry.retry(func, args, kwargs, None, attempt)
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in post(self, url, data, json, **kwargs)
588 """
589
--> 590 return self.request('POST', url, data=data, json=json, **kwargs)
591
592 def put(self, url, data=None, **kwargs):
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
540 }
541 send_kwargs.update(settings)
--> 542 resp = self.send(prep, **send_kwargs)
543
544 return resp
/opt/conda/lib/python3.9/site-packages/requests/sessions.py in send(self, request, **kwargs)
653
654 # Send the request
--> 655 r = adapter.send(request, **kwargs)
656
657 # Total elapsed time of the request (approximately)
/opt/conda/lib/python3.9/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
437 try:
438 if not chunked:
--> 439 resp = conn.urlopen(
440 method=request.method,
441 url=url,
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
668
669 # Make the request on the httplib connection object.
--> 670 httplib_response = self._make_request(
671 conn,
672 method,
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 # Trigger any extra validation we need to do.
380 try:
--> 381 self._validate_conn(conn)
382 except (SocketTimeout, BaseSSLError) as e:
383 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
/opt/conda/lib/python3.9/site-packages/urllib3/connectionpool.py in _validate_conn(self, conn)
976 # Force connect early to allow us to validate the connection.
977 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
--> 978 conn.connect()
979
980 if not conn.is_verified:
/opt/conda/lib/python3.9/site-packages/urllib3/connection.py in connect(self)
307 def connect(self):
308 # Add certificate verification
--> 309 conn = self._new_conn()
310 hostname = self.host
311
/opt/conda/lib/python3.9/site-packages/urllib3/connection.py in _new_conn(self)
157
158 try:
--> 159 conn = connection.create_connection(
160 (self._dns_host, self.port), self.timeout, **extra_kw
161 )
/opt/conda/lib/python3.9/site-packages/urllib3/util/connection.py in create_connection(address, timeout, source_address, socket_options)
72 if source_address:
73 sock.bind(source_address)
---> 74 sock.connect(sa)
75 return sock
76
KeyboardInterrupt:
In [5]: rows = cur.fetchall()
I'm using Python to request things to the smashgg API (the queries are in GraphQL) and I got an error i never had before while running the code below :
headers = {"Authorization": "Bearer my token"}
def run_query(query, variables): # A simple function to use requests.post to make the API call. Note the json= section.
request = requests.post('https://api.smash.gg/gql/alpha', json={'query': query, 'variables': variables}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
nbRequestsPerMinute = 75
dataFrameAllSets = pd.read_csv('dataFrameAllSets.csv') #sets of players I already fetched
countryByPlayer = pd.DataFrame(columns = [
"playerId",
"playerCountry"
])
uniqueValues1 = dataFrameAllSets["setWinnerId"].unique()
uniqueValues2 = dataFrameAllSets["setLoserId"].unique()
concate = np.concatenate((uniqueValues1, uniqueValues2))
countryByPlayer["playerId"] = concate
countryByPlayer.drop_duplicates(subset = "playerId", inplace = True)
#countryByPlayer has more than 200 000 rows
def queryCountry(playerId) :
query = """
query player ($playerId: ID!){
player(id: $playerId){
user{
location {
country
}
}
}
}
"""
variables = {
"playerId": playerId
}
return query, variables
for index, x in countryByPlayer.iterrows() :
t0 = time.time()
query, variables = queryCountry(int(x['playerId']))
result = run_query(query, variables) # Execute the query
if result["data"]["player"] != None :
if result["data"]["player"]["user"] != None :
if result["data"]["player"]["user"]["location"] != None :
countryByPlayer.at[index, "playerCountry"] = result["data"]["player"]["user"]["location"]["country"]
else :
countryByPlayer.at[index, "playerCountry"] = "noneLocation"
else :
countryByPlayer.at[index, "playerCountry"] = "nonePlayer"
t1 = time.time()
if t1 - t0 < 60/nbRequestsPerMinute :
time.sleep(60/nbRequestsPerMinute - t1 + t0)
The error message is the following one :
RemoteDisconnected Traceback (most recent call last)
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~\Anaconda3\lib\site-packages\urllib3\packages\six.py in raise_from(value, from_value)
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
~\Anaconda3\lib\http\client.py in getresponse(self)
1320 try:
-> 1321 response.begin()
1322 except ConnectionError:
~\Anaconda3\lib\http\client.py in begin(self)
295 while True:
--> 296 version, status, reason = self._read_status()
297 if status != CONTINUE:
~\Anaconda3\lib\http\client.py in _read_status(self)
264 # sending a valid response.
--> 265 raise RemoteDisconnected("Remote end closed connection without"
266 " response")
RemoteDisconnected: Remote end closed connection without response
During handling of the above exception, another exception occurred:
ProtocolError Traceback (most recent call last)
~\Anaconda3\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
448 retries=self.max_retries,
--> 449 timeout=timeout
450 )
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
637 retries = retries.increment(method, url, error=e, _pool=self,
--> 638 _stacktrace=sys.exc_info()[2])
639 retries.sleep()
~\Anaconda3\lib\site-packages\urllib3\util\retry.py in increment(self, method, url, response, error, _pool, _stacktrace)
366 if read is False or not self._is_method_retryable(method):
--> 367 raise six.reraise(type(error), error, _stacktrace)
368 elif read is not None:
~\Anaconda3\lib\site-packages\urllib3\packages\six.py in reraise(tp, value, tb)
684 if value.__traceback__ is not tb:
--> 685 raise value.with_traceback(tb)
686 raise value
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
599 body=body, headers=headers,
--> 600 chunked=chunked)
601
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
383 # otherwise it looks like a programming error was the cause.
--> 384 six.raise_from(e, None)
385 except (SocketTimeout, BaseSSLError, SocketError) as e:
~\Anaconda3\lib\site-packages\urllib3\packages\six.py in raise_from(value, from_value)
~\Anaconda3\lib\site-packages\urllib3\connectionpool.py in _make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
379 try:
--> 380 httplib_response = conn.getresponse()
381 except Exception as e:
~\Anaconda3\lib\http\client.py in getresponse(self)
1320 try:
-> 1321 response.begin()
1322 except ConnectionError:
~\Anaconda3\lib\http\client.py in begin(self)
295 while True:
--> 296 version, status, reason = self._read_status()
297 if status != CONTINUE:
~\Anaconda3\lib\http\client.py in _read_status(self)
264 # sending a valid response.
--> 265 raise RemoteDisconnected("Remote end closed connection without"
266 " response")
ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
<ipython-input-5-f3b194f946df> in <module>
4 compteurPlayers += 1
5 query, variables = queryCountry(int(x['playerId']))
----> 6 result = run_query(query, variables) # Execute the query
7 if result["data"]["player"] != None :
8 if result["data"]["player"]["user"] != None :
<ipython-input-2-2a5c0920ef76> in run_query(query, variables)
3
4 def run_query(query, variables): # A simple function to use requests.post to make the API call. Note the json= section.
----> 5 request = requests.post('https://api.smash.gg/gql/alpha', json={'query': query, 'variables': variables}, headers=headers)
6 if request.status_code == 200:
7 return request.json()
~\Anaconda3\lib\site-packages\requests\api.py in post(url, data, json, **kwargs)
114 """
115
--> 116 return request('post', url, data=data, json=json, **kwargs)
117
118
~\Anaconda3\lib\site-packages\requests\api.py in request(method, url, **kwargs)
58 # cases, and look like a memory leak in others.
59 with sessions.Session() as session:
---> 60 return session.request(method=method, url=url, **kwargs)
61
62
~\Anaconda3\lib\site-packages\requests\sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
531 }
532 send_kwargs.update(settings)
--> 533 resp = self.send(prep, **send_kwargs)
534
535 return resp
~\Anaconda3\lib\site-packages\requests\sessions.py in send(self, request, **kwargs)
644
645 # Send the request
--> 646 r = adapter.send(request, **kwargs)
647
648 # Total elapsed time of the request (approximately)
~\Anaconda3\lib\site-packages\requests\adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
496
497 except (ProtocolError, socket.error) as err:
--> 498 raise ConnectionError(err, request=request)
499
500 except MaxRetryError as e:
ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
I first thought it had something to do with the rate limit of the API which is set to 80 requests by minutes, however, I delay the loop to always be under this rate limit.
Moreover, when retesting my code multiple times, the error appeared randomly during the loop.
So I come to you guys because I really need your help.
Thanks in advance.
Problem
A remote service is unreliable.
Solution
Program defensively by handling anticipated errors in your code. Consider implementing an exponential backoff with maximum retries. Also, add logging to track with requests were successful, retried or completely failed. If necessary you may want to implement application monitoring or paging system to alert you if a certain condition is met (100 errors in a row) if this is considered a process critical to your application.
Also, the service may have a bulk API that you can use, so instead of submitting n requests (where n is the number of player ids) you can submit n / bulk_limit (where bulk_limit is the max number of ids their bulk api accepts to process in a single request).
I have a few line of codes that used to work very well until today (using gspread). I get an certificate verification error whenever I run this code:
json_key = json.load(open('Sheetmodification_abc.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
wks = gc.open("googlespreadsheets").sheet1
What could be the reason for this and how do I fix it? I've been looking here for a solution: https://github.com/burnash/gspread/issues/223
But don't understand the solution offered.
Here's the traceback:
---------------------------------------------------------------------------
SSLError Traceback (most recent call last)
C:\Users\Nathan\App_Finder_2.0.1.py in <module>()
142 gc = gspread.authorize(credentials)
143
--> 144 wks = gc.open("googlespreadsheets").sheet1
145
146 # 1. Retrieve app name
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\client.pyc in open(self, title)
143
144 """
--> 145 feed = self.get_spreadsheets_feed()
146
147 for elem in feed.findall(_ns('entry')):
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\client.pyc in get_spreadsheets_feed(self, visibility, projection)
229 visibility=visibility, projection=projection)
230
--> 231 r = self.session.get(url)
232 return ElementTree.fromstring(r.content)
233
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\httpsession.pyc in get(self, url, **kwargs)
73
74 def get(self, url, **kwargs):
---> 75 return self.request('GET', url, **kwargs)
76
77 def delete(self, url, **kwargs):
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\httpsession.pyc in request(self, method, url, data, headers)
65 except AttributeError:
66 raise Exception("HTTP method '{}' is not supported".format(method))
---> 67 response = func(url, data=data, headers=request_headers)
68
69 if response.status_code > 399:
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\api.pyc in get(url, params, **kwargs)
67
68 kwargs.setdefault('allow_redirects', True)
---> 69 return request('get', url, params=params, **kwargs)
70
71
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\api.pyc in request(method, url, **kwargs)
48
49 session = sessions.Session()
---> 50 response = session.request(method=method, url=url, **kwargs)
51 # By explicitly closing the session, we avoid leaving sockets open which
52 # can trigger a ResourceWarning in some cases, and look like a memory leak
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\sessions.pyc in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
463 }
464 send_kwargs.update(settings)
--> 465 resp = self.send(prep, **send_kwargs)
466
467 return resp
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\sessions.pyc in send(self, request, **kwargs)
571
572 # Send the request
--> 573 r = adapter.send(request, **kwargs)
574
575 # Total elapsed time of the request (approximately)
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\adapters.pyc in send(self, request, stream, timeout, verify, cert, proxies)
429 except (_SSLError, _HTTPError) as e:
430 if isinstance(e, _SSLError):
--> 431 raise SSLError(e, request=request)
432 elif isinstance(e, ReadTimeoutError):
433 raise ReadTimeout(e, request=request)
SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:581)
First of all, I should mention that I am using Python 2.7 on Windows. I found that if I run a script very similar to yours without "Administrator Privileges" Python throws a SSLError exception (in my case, ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:590)). Running the same exact script with "Administrator Privileges" (CMD, "Run as Administrator") doesn't raise any exception.
It is possible that this could be related to Windows Firewall.
I have a few line of codes that used to work very well until today (using gspread). I get an certificate verification error whenever I run this code:
json_key = json.load(open('Sheetmodification_abc.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
gc = gspread.authorize(credentials)
wks = gc.open("googlespreadsheets").sheet1
What could be the reason for this and how do I fix it? I've been looking here for a solution: https://github.com/burnash/gspread/issues/223
But don't understand the solution offered.
EDIT: Here's the traceback:
---------------------------------------------------------------------------
SSLError Traceback (most recent call last)
C:\Users\Nathan\App_Finder_2.0.1.py in <module>()
142 gc = gspread.authorize(credentials)
143
--> 144 wks = gc.open("googlespreadsheets").sheet1
145
146 # 1. Retrieve app name
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\client.pyc in open(self, title)
143
144 """
--> 145 feed = self.get_spreadsheets_feed()
146
147 for elem in feed.findall(_ns('entry')):
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\client.pyc in get_spreadsheets_feed(self, visibility, projection)
229 visibility=visibility, projection=projection)
230
--> 231 r = self.session.get(url)
232 return ElementTree.fromstring(r.content)
233
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\httpsession.pyc in get(self, url, **kwargs)
73
74 def get(self, url, **kwargs):
---> 75 return self.request('GET', url, **kwargs)
76
77 def delete(self, url, **kwargs):
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\gspread\httpsession.pyc in request(self, method, url, data, headers)
65 except AttributeError:
66 raise Exception("HTTP method '{}' is not supported".format(method))
---> 67 response = func(url, data=data, headers=request_headers)
68
69 if response.status_code > 399:
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\api.pyc in get(url, params, **kwargs)
67
68 kwargs.setdefault('allow_redirects', True)
---> 69 return request('get', url, params=params, **kwargs)
70
71
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\api.pyc in request(method, url, **kwargs)
48
49 session = sessions.Session()
---> 50 response = session.request(method=method, url=url, **kwargs)
51 # By explicitly closing the session, we avoid leaving sockets open which
52 # can trigger a ResourceWarning in some cases, and look like a memory leak
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\sessions.pyc in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
463 }
464 send_kwargs.update(settings)
--> 465 resp = self.send(prep, **send_kwargs)
466
467 return resp
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\sessions.pyc in send(self, request, **kwargs)
571
572 # Send the request
--> 573 r = adapter.send(request, **kwargs)
574
575 # Total elapsed time of the request (approximately)
C:\Users\Nathan\AppData\Local\Enthought\Canopy\User\lib\site-packages\requests\adapters.pyc in send(self, request, stream, timeout, verify, cert, proxies)
429 except (_SSLError, _HTTPError) as e:
430 if isinstance(e, _SSLError):
--> 431 raise SSLError(e, request=request)
432 elif isinstance(e, ReadTimeoutError):
433 raise ReadTimeout(e, request=request)
SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:581)