I'm trapping thus:
with httpx.Client(**sessions[scraperIndex]) as client:
try:
response = client.get(...)
except TimeoutError as e:
print('does not hit')
except Exception as e:
print(f'⛔️ Unexpected exception: {e}')
print_exc() # hits!
However I'm getting the below crashdump.
Pulling out key lines:
TimeoutError: The read operation timed out
During handling of the above exception, another exception occurred:
httpcore.ReadTimeout: The read operation timed out
The above exception was the direct cause of the following exception:
httpx.ReadTimeout: The read operation timed out
Why isn't my TimeoutError catching this?
And what's the correct catch? Can someone give a logic for deducing it?
CrashDump:
⛔️ Unexpected exception: The read operation timed out
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/httpcore/_exceptions.py", line 8, in map_exceptions
yield
File "/usr/local/lib/python3.10/dist-packages/httpcore/backends/sync.py", line 26, in read
return self._sock.recv(max_bytes)
File "/usr/lib/python3.10/ssl.py", line 1258, in recv
return self.read(buflen)
File "/usr/lib/python3.10/ssl.py", line 1131, in read
return self._sslobj.read(len)
TimeoutError: The read operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/httpx/_transports/default.py", line 60, in map_httpcore_exceptions
yield
File "/usr/local/lib/python3.10/dist-packages/httpx/_transports/default.py", line 218, in handle_request
resp = self._pool.handle_request(req)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection_pool.py", line 253, in handle_request
raise exc
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection_pool.py", line 237, in handle_request
response = connection.handle_request(request)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection.py", line 90, in handle_request
return self._connection.handle_request(request)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py", line 105, in handle_request
raise exc
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py", line 84, in handle_request
) = self._receive_response_headers(**kwargs)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py", line 148, in _receive_response_headers
event = self._receive_event(timeout=timeout)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py", line 177, in _receive_event
data = self._network_stream.read(
File "/usr/local/lib/python3.10/dist-packages/httpcore/backends/sync.py", line 24, in read
with map_exceptions(exc_map):
File "/usr/lib/python3.10/contextlib.py", line 153, in __exit__
self.gen.throw(typ, value, traceback)
File "/usr/local/lib/python3.10/dist-packages/httpcore/_exceptions.py", line 12, in map_exceptions
raise to_exc(exc)
httpcore.ReadTimeout: The read operation timed out
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/root/scraper-pi/Scrape.py", line 148, in main
cursor, _nScraped = scrape(client, cursor)
File "/root/scraper-pi/Scrape.py", line 79, in scrape
response = client.get(
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 1039, in get
return self.request(
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 815, in request
return self.send(request, auth=auth, follow_redirects=follow_redirects)
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 902, in send
response = self._send_handling_auth(
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 930, in _send_handling_auth
response = self._send_handling_redirects(
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 967, in _send_handling_redirects
response = self._send_single_request(request)
File "/usr/local/lib/python3.10/dist-packages/httpx/_client.py", line 1003, in _send_single_request
response = transport.handle_request(request)
File "/usr/local/lib/python3.10/dist-packages/httpx/_transports/default.py", line 217, in handle_request
with map_httpcore_exceptions():
File "/usr/lib/python3.10/contextlib.py", line 153, in __exit__
self.gen.throw(typ, value, traceback)
File "/usr/local/lib/python3.10/dist-packages/httpx/_transports/default.py", line 77, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.ReadTimeout: The read operation timed out
The base class for all httpx timeout errors is not the built-in TimeoutError (presumably because that would also make timeouts OSErrors, which doesn't sound correct), but httpx.TimeoutException.
import httpx
with httpx.Client() as client:
try:
response = client.get("http://httpbin.org/get", timeout=0.001)
except httpx.TimeoutException as e:
print('gottem')
prints gottem just fine.
Related
I am trying to install saleor on Linux Mint according to the instructions
https://docs.saleor.io/docs/3.0/developer/installation
When executing the command
docker-compose run --rm api python3 manage.py migrate
I get an error. This is the stack trace. How do I resolve this issue?
$docker-compose run --rm api python3 manage.py migrate
Starting saleor-platform_db_1 ...
Starting saleor-platform_jaeger_1 ... done
Starting saleor-platform_redis_1 ... done
ERROR: for saleor-platform_db_1 a bytes-like object is required, not 'str'
ERROR: for db a bytes-like object is required, not 'str'
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/usr/lib/python3/dist-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.22/containers/c015b9d2a6e0ba06c8cc393147db2a4eb1a0fc72d1ae2805e177b409bb8212db/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/compose/service.py", line 625, in start_container
container.start()
File "/usr/lib/python3/dist-packages/compose/container.py", line 241, in start
return self.client.start(self.id, **options)
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/container.py", line 1095, in start
self._raise_for_status(res)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/usr/lib/python3/dist-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'driver failed programming external connectivity on endpoint saleor-platform_db_1 (1b57cb27e18e4e18fad1fde3f6bebb573260974514be140c7e4e0d74d663b7b0): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 128, in perform_command
handler(command, command_options)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 896, in run
run_one_off_container(
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1343, in run_one_off_container
project.up(
File "/usr/lib/python3/dist-packages/compose/project.py", line 565, in up
results, errors = parallel.parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/project.py", line 548, in do
return service.execute_convergence_plan(
File "/usr/lib/python3/dist-packages/compose/service.py", line 567, in execute_convergence_plan
return self._execute_convergence_start(
File "/usr/lib/python3/dist-packages/compose/service.py", line 506, in _execute_convergence_start
_, errors = parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/service.py", line 508, in <lambda>
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
File "/usr/lib/python3/dist-packages/compose/service.py", line 620, in start_container_if_stopped
return self.start_container(container)
File "/usr/lib/python3/dist-packages/compose/service.py", line 627, in start_container
if "driver failed programming external connectivity" in ex.explanation:
TypeError: a bytes-like object is required, not 'str'
Error in sys.excepthook:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/apport_python_hook.py", line 153, in apport_excepthook
with os.fdopen(os.open(pr_filename,
FileNotFoundError: [Errno 2] No such file or directory: '/var/crash/_usr_bin_docker-compose.1000.crash'
Original exception was:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/usr/lib/python3/dist-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.22/containers/c015b9d2a6e0ba06c8cc393147db2a4eb1a0fc72d1ae2805e177b409bb8212db/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/compose/service.py", line 625, in start_container
container.start()
File "/usr/lib/python3/dist-packages/compose/container.py", line 241, in start
return self.client.start(self.id, **options)
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/container.py", line 1095, in start
self._raise_for_status(res)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/usr/lib/python3/dist-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'driver failed programming external connectivity on endpoint saleor-platform_db_1 (1b57cb27e18e4e18fad1fde3f6bebb573260974514be140c7e4e0d74d663b7b0): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 128, in perform_command
handler(command, command_options)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 896, in run
run_one_off_container(
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1343, in run_one_off_container
project.up(
File "/usr/lib/python3/dist-packages/compose/project.py", line 565, in up
results, errors = parallel.parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/project.py", line 548, in do
return service.execute_convergence_plan(
File "/usr/lib/python3/dist-packages/compose/service.py", line 567, in execute_convergence_plan
return self._execute_convergence_start(
File "/usr/lib/python3/dist-packages/compose/service.py", line 506, in _execute_convergence_start
_, errors = parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/service.py", line 508, in <lambda>
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
File "/usr/lib/python3/dist-packages/compose/service.py", line 620, in start_container_if_stopped
return self.start_container(container)
File "/usr/lib/python3/dist-packages/compose/service.py", line 627, in start_container
if "driver failed programming external connectivity" in ex.explanation:
TypeError: a bytes-like object is required, not 'str'
I'm running a python script that is supposed to listen tweets with a specific '#' (and then execute some code).
My code is almost running 100% correctly but I still have an issue: my program stops running at random time for "no reason".
Here is the error:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/http/client.py", line 551, in _get_chunk_left
chunk_left = self._read_next_chunk_size()
File "/usr/local/lib/python3.8/http/client.py", line 518, in _read_next_chunk_size
return int(line, 16)
ValueError: invalid literal for int() with base 16: b''
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/http/client.py", line 583, in _readinto_chunked
chunk_left = self._get_chunk_left()
File "/usr/local/lib/python3.8/http/client.py", line 553, in _get_chunk_left
raise IncompleteRead(b'')
http.client.IncompleteRead: IncompleteRead(0 bytes read)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/urllib3/response.py", line 436, in _error_catcher
yield
File "/usr/local/lib/python3.8/site-packages/urllib3/response.py", line 518, in read
data = self._fp.read(amt) if not fp_closed else b""
File "/usr/local/lib/python3.8/http/client.py", line 454, in read
n = self.readinto(b)
File "/usr/local/lib/python3.8/http/client.py", line 488, in readinto
return self._readinto_chunked(b)
File "/usr/local/lib/python3.8/http/client.py", line 599, in _readinto_chunked
raise IncompleteRead(bytes(b[0:total_bytes]))
http.client.IncompleteRead: IncompleteRead(292 bytes read)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 133, in <module>
myStream.filter(track=['#DownDropship'])
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 474, in filter
self._start(is_async)
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 389, in _start
self._run()
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 320, in _run
six.reraise(*exc_info)
File "/usr/local/lib/python3.8/site-packages/six.py", line 703, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 289, in _run
self._read_loop(resp)
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 339, in _read_loop
line = buf.read_line()
File "/usr/local/lib/python3.8/site-packages/tweepy/streaming.py", line 200, in read_line
self._buffer += self._stream.read(self._chunk_size)
File "/usr/local/lib/python3.8/site-packages/urllib3/response.py", line 540, in read
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
File "/usr/local/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.8/site-packages/urllib3/response.py", line 454, in _error_catcher
raise ProtocolError("Connection broken: %r" % e, e)
urllib3.exceptions.ProtocolError: ('Connection broken: IncompleteRead(292 bytes read)', IncompleteRead(292 bytes read))
Here is a piece of my code:
from tweepy import StreamListener
from retrying import retry
import tweepy
#retry
class MyStreamListener(StreamListener):
#retry
def on_status(self, status):
do something ...
except Exception as e:
pass
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth=twitter_handler.api.auth, listener=myStreamListener)
myStream.filter(track=['#Something'])
I found these "#retry" here but apparently, it doesn't work properly.
Thanks for your help
Found an interesting piece of code, on gist.github.
It works great, if the remote network is responding fast enough, with the default "timeout=0.1". however, in real live application, this often needs to be raised. Particularly, when scanning the network range, this script shoots out the error, with timeout as low as 0.2, almost each time.
Looking at the code, seems that the script is supposed to ignore such errors, but it, actually, instantly quits working.
except (asyncio.TimeoutError, ConnectionRefusedError):
pass
else:
yield task # yield successful tasks
Here's the full error I get:
unhandled exception during asyncio.run() shutdown
task: <Task finished coro=<<async_generator_athrow without __name__>()> exception=RuntimeError("can't send non-None value to a just-started coroutine")>
Traceback (most recent call last):
File "/usr/local/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "port.py", line 130, in main
async for task in streamer:
File "/usr/local/lib/python3.7/site-packages/aiostream/stream/advanced.py", line 43, in base_combine
result = getter()
File "port.py", line 79, in task_worker
await asyncio.wait_for(conn, task.timeout)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 416, in wait_for
return fut.result()
File "/usr/local/lib/python3.7/asyncio/streams.py", line 77, in open_connection
lambda: protocol, host, port, **kwds)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 959, in create_connection
raise exceptions[0]
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 946, in create_connection
await self.sock_connect(sock, address)
File "/usr/local/lib/python3.7/asyncio/selector_events.py", line 464, in sock_connect
return await fut
File "/usr/local/lib/python3.7/asyncio/selector_events.py", line 494, in _sock_connect_cb
raise OSError(err, f'Connect call failed {address}')
OSError: [Errno 113] Connect call failed ('184.183.182.181', 8001)
During handling of the above exception, another exception occurred:
RuntimeError: can't send non-None value to a just-started coroutine
Traceback (most recent call last):
File "port.py", line 153, in <module>
asyncio.run(main(sys.argv[1], ''.join(sys.argv[2:])))
File "/usr/local/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "port.py", line 130, in main
async for task in streamer:
File "/usr/local/lib/python3.7/site-packages/aiostream/stream/advanced.py", line 43, in base_combine
result = getter()
File "port.py", line 79, in task_worker
await asyncio.wait_for(conn, task.timeout)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 416, in wait_for
return fut.result()
File "/usr/local/lib/python3.7/asyncio/streams.py", line 77, in open_connection
lambda: protocol, host, port, **kwds)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 959, in create_connection
raise exceptions[0]
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 946, in create_connection
await self.sock_connect(sock, address)
File "/usr/local/lib/python3.7/asyncio/selector_events.py", line 464, in sock_connect
return await fut
File "/usr/local/lib/python3.7/asyncio/selector_events.py", line 494, in _sock_connect_cb
raise OSError(err, f'Connect call failed {address}')
OSError: [Errno 113] Connect call failed ('184.183.182.181', 8001)
The goal is to ignore the error and to complete the loop.
Any input appreciated.
I'm using Python 3.4 on a Raspberry Pi to upload weather data to a website. Sometime there's a problem uploading (slow Internet or something) and my program crashes. I'm using try/except, but for some reason it's not catching the error. I thought the the last except statement should catch any other errors.
Here's the error:
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 343, in _make_request
self._validate_conn(conn)
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 839, in _validate_conn
conn.connect()
File "/usr/local/lib/python3.4/dist-packages/urllib3/connection.py", line 344, in connect
ssl_context=context)
File "/usr/local/lib/python3.4/dist-packages/urllib3/util/ssl_.py", line 344, in ssl_wrap_socket
return context.wrap_socket(sock, server_hostname=server_hostname)
File "/usr/lib/python3.4/ssl.py", line 364, in wrap_socket
_context=self)
File "/usr/lib/python3.4/ssl.py", line 577, in __init__
self.do_handshake()
File "/usr/lib/python3.4/ssl.py", line 804, in do_handshake
self._sslobj.do_handshake()
socket.timeout: _ssl.c:584: The handshake operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 638, in urlopen
_stacktrace=sys.exc_info()[2])
File "/usr/local/lib/python3.4/dist-packages/urllib3/util/retry.py", line 367, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/usr/local/lib/python3.4/dist-packages/urllib3/packages/six.py", line 686, in reraise
raise value
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 600, in urlopen
chunked=chunked)
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 346, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
File "/usr/local/lib/python3.4/dist-packages/urllib3/connectionpool.py", line 306, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='rtupdate.wunderground.com', port=443): Read timed out. (read timeout=5)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/Weather_Station/WU_upload.py", line 52, in upload2WU
r = requests.get(full_URL, timeout=5) # send data to WU
File "/usr/local/lib/python3.4/dist-packages/requests/api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/requests/api.py", line 60, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/usr/local/lib/python3.4/dist-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/requests/adapters.py", line 529, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: HTTPSConnectionPool(host='rtupdate.wunderground.com', port=443): Read timed out. (read timeout=5)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pi/Weather_Station/Weather_Station.py", line 381, in <module>
uploadStatus = WU_upload.upload2WU(suntec, WU_STATION)
File "/home/pi/Weather_Station/WU_upload.py", line 65, in upload2WU
except requests.exceptions.NewConnectionError:
AttributeError: 'module' object has no attribute 'NewConnectionError'
>>>
Here's my code:
try:
r = requests.get(full_URL, timeout=5) # send data to WU
# If uploaded successfully, website will reply with 200
if r.status_code == 200:
return(True)
else:
print('Upload Error: {} {}'.format(r.status_code, r.text))
return(False)
except requests.exceptions.ConnectionError:
print("Upload Error in upload2WU() - ConnectionError")
return(False)
except requests.exceptions.NewConnectionError:
print("Upload Error in upload2WU() - NewConnectionError")
return(False)
except requests.exceptions.ReadTimeout:
print("Upload Error in upload2WU() - ReadTimeout")
return(False)
except requests.exceptions.MaxRetryError:
print("Upload Error in upload2WU() - MaxRetryError")
return(False)
except socket.gaierror:
print("Upload Error in upload2WU() - socket.gaierror")
return(False)
except:
print("Upload Error in upload2WU() - other")
return(False)
This problem happens rarely, i.e. only with specific images. How should I avoid it?
this does work:
response = requests.get(url='http://1.bp.blogspot.com/-XFloD6F3Tws/VPW9r8e3fzI/AAAAAAAH77c/izzZRWSbGdk/s1600/16467_1197957903548615_3921728777903612927_n.jpg', stream=True, timeout=60)
response.text
but this does not:
response = requests.get(url='http://1.bp.blogspot.com/-6SnMAxnWLKM/VPTIf5AMcDI/AAAAAAAH7zA/wNRxXAcH_e4/s1600/Ανώνυμο-1.jpg', stream=True, timeout=60)
response.text
in fact, it returns the following error:
Traceback (most recent call last):
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/packages/urllib3/response.py", line 186, in read
data = self._fp.read(amt)
File "/home/eualin/.bin/anaconda3/lib/python3.4/http/client.py", line 500, in read
return super(HTTPResponse, self).read(amt)
File "/home/eualin/.bin/anaconda3/lib/python3.4/http/client.py", line 539, in readinto
n = self.fp.readinto(b)
File "/home/eualin/.bin/anaconda3/lib/python3.4/socket.py", line 371, in readinto
return self._sock.recv_into(b)
socket.timeout: timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/models.py", line 638, in generate
for chunk in self.raw.stream(chunk_size, decode_content=True):
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/packages/urllib3/response.py", line 256, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/packages/urllib3/response.py", line 201, in read
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
requests.packages.urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='1.bp.blogspot.com', port=80): Read timed out.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/models.py", line 734, in text
if not self.content:
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/models.py", line 707, in content
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/models.py", line 645, in generate
raise ConnectionError(e)
requests.exceptions.ConnectionError: HTTPConnectionPool(host='1.bp.blogspot.com', port=80): Read timed out.
EDIT
This is the error I get when I apply .encode("utf-8") on the url string.
>>> response = requests.get(url='http://1.bp.blogspot.com/-6SnMAxnWLKM/VPTIf5AMcDI/AAAAAAAH7zA/wNRxXAcH_e4/s1600/Ανώνυμο-1.jpg'.encode("utf-8"), stream=True, timeout=60)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/api.py", line 59, in get
return request('get', url, **kwargs)
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/api.py", line 48, in request
return session.request(method=method, url=url, **kwargs)
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/sessions.py", line 451, in request
resp = self.send(prep, **send_kwargs)
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/sessions.py", line 551, in send
adapter = self.get_adapter(url=request.url)
File "/home/eualin/.bin/anaconda3/lib/python3.4/site-packages/requests/sessions.py", line 630, in get_adapter
raise InvalidSchema("No connection adapters were found for '%s'" % url)
requests.exceptions.InvalidSchema: No connection adapters were found for 'b'http://1.bp.blogspot.com/-6SnMAxnWLKM/VPTIf5AMcDI/AAAAAAAH7zA/wNRxXAcH_e4/s1600/\xce\x91\xce\xbd\xcf\x8e\xce\xbd\xcf\x85\xce\xbc\xce\xbf-1.jpg''
>>>