I am trying to make a program that searches amazon using the amazon simple-product API but it is showing some HTTP 410:Gone error.
Here is the code.
from amazon.api import AmazonAPI
amazon = AmazonAPI('A********************A', 'X**************************m',
'1*******************0')
a=input(':')
results = amazon.search(Keywords = a, SearchIndex = "Books")
for item in results:
print (item.title, item.isbn, item.price_and_currency)
Now this is the error
Traceback (most recent call last):
File "C:\Users\susheel\Desktop\booksearch.py", line 12, in <module>
for item in results:
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\site-packages\amazon\api.py", line
544, in __iter__
for page in self.iterate_pages():
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\site-packages\amazon\api.py", line
561, in iterate_pages
yield self._query(ItemPage=self.current_page, **self.kwargs)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\site-packages\amazon\api.py", line
573, in _query
response = self.api.ItemSearch(ResponseGroup=ResponseGroup, **kwargs)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\site-packages\bottlenose\api.py",
line 273, in __call__
response = self._call_api(api_url,
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\site-packages\bottlenose\api.py",
line 235, in _call_api
return urllib2.urlopen(api_request, timeout=self.Timeout)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 222, in
urlopen
return opener.open(url, data, timeout)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 531, in
open
response = meth(req, response)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 640, in
http_response
response = self.parent.error(
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 569, in
error
return self._call_chain(*args)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 502, in
_call_chain
result = func(*args)
File "C:\Users\susheel\AppData\Local\Programs\Python\Python38\lib\urllib\request.py", line 649, in
http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 410: Gone
Please help me out.
Related
I am using the python-eureka-client to register my Python service with Eureka and send and receive REST calls.
I sometimes get the error shown below when my Python service is under very heavy load. The service tries to send back its result to another service using this command:
res = eureka_client.do_service(
app_name=SCIARA_EMISSIONS_SERVICE,
service='/some-address/',
method="PUT",
headers={'Content-Type': 'application/json', 'Authorization': jwt_id_token},
data=json_term.encode("utf-8"),
timeout=10
)
this usually works, but every once in a while it throws the error below. I already tried increasing the timeout to no avail.
Do you have any other suggestions as to what may be going on?
Thanks, Tim
Versions:
Python 3.9.6
py_eureka_client==0.11.3
spring-config-client==0.2
Error listing:
Traceback (most recent call last):
File "/usr/src/app/api.py", line 43, in send_result
res = eureka_client.do_service(
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/eureka_client.py", line 1310, in do_service
return get_event_loop().run_until_complete(do_service_async(app_name=app_name, service=service, return_type=return_type,
File "/usr/local/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
return future.result()
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/eureka_client.py", line 1180, in do_service_async
res = await cli.do_service(app_name=app_name, service=service, return_type=return_type,
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/eureka_client.py", line 937, in do_service
return await self.walk_nodes(app_name, service, prefer_ip, prefer_https, walk_using_urllib)
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/eureka_client.py", line 894, in walk_nodes
return await obj
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/eureka_client.py", line 929, in walk_using_urllib
res: http_client.HttpResponse = await http_client.http_client.urlopen(
File "/usr/local/lib/python3.9/site-packages/py_eureka_client/http_client.py", line 148, in urlopen
res = urllib.request.urlopen(req._to_urllib_request(), data=data, timeout=timeout)
File "/usr/local/lib/python3.9/urllib/request.py", line 214, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.9/urllib/request.py", line 523, in open
response = meth(req, response)
File "/usr/local/lib/python3.9/urllib/request.py", line 632, in http_response
response = self.parent.error(
File "/usr/local/lib/python3.9/urllib/request.py", line 561, in error
return self._call_chain(*args)
File "/usr/local/lib/python3.9/urllib/request.py", line 494, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.9/urllib/request.py", line 641, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 401: Unauthorized
I am trying to run the following code in order to search for the next words of a text that come after "examination".
Input is a pdf that i try to convert to a text using tinka.
Somehow the code throws an error referred to tinka that i do not understand.
Does anyone know how to fix it or knows another way to implement my problem?
import re
from tika import parser
raw = parser.from_file('application0001.pdf')
print(raw['content'])
list_of_words = raw.split()
search="examination"
next_word = list_of_words[list_of_words.index(search) + 1]
print(next_word)
This is the error I get when running it and I do not get what it means.
2019-05-24 09:53:53,217 [MainThread ] [INFO ] Retrieving http://search.maven.org/remotecontent?filepath=org/apache/tika/tika-server/1.19/tika-server-1.19.jar to /var/folders/xn/p33pzhs179n33z55z66lqcn00000gn/T/tika-server.jar.
Traceback (most recent call last):
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/tika.py", line 716, in getRemoteJar
urlretrieve(urlOrPath, destPath)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 532, in open
response = meth(req, response)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 570, in error
return self._call_chain(*args)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 503: Service Unavailable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/Mauritius/Desktop/text_search.py", line 7, in <module>
raw = parser.from_file('application0001.pdf')
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/parser.py", line 36, in from_file
jsonOutput = parse1('all', filename, serverEndpoint, headers=headers, config_path=config_path)
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/tika.py", line 328, in parse1
headers, verbose, tikaServerJar, config_path=config_path, rawResponse=rawResponse)
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/tika.py", line 522, in callServer
serverEndpoint = checkTikaServer(scheme, serverHost, port, tikaServerJar, classpath, config_path)
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/tika.py", line 571, in checkTikaServer
getRemoteJar(tikaServerJar, jarPath)
File "/Users/Mauritius/anaconda3/lib/python3.6/site-packages/tika/tika.py", line 726, in getRemoteJar
urlretrieve(urlOrPath, destPath)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 248, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 532, in open
response = meth(req, response)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 570, in error
return self._call_chain(*args)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/Users/Mauritius/anaconda3/lib/python3.6/urllib/request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 503: Service Unavailable
[Finished in 2.7s with exit code 1]
[shell_cmd: python -u "/Users/Mauritius/Desktop/text_search.py"]
[dir: /Users/Mauritius/Desktop]
[path: /Users/Mauritius/miniconda3/bin:/opt/local/bin:/opt/local/sbin:/Users/Mauritius/anaconda3/bin:/Library/Frameworks/Python.framework/Versions/3.5/bin://anaconda/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/TeX/texbin]
This is my first attempt with python, Im trying to use an external library for xml parsing for python 3.6.
I'm getting an error which doesn't seem to have anything to do with my code, and I can't figure out what the problem is from the error output
my code:
import untangle
x = untangle.parse(r"C:\file.xml")
error:
Traceback (most recent call last):
File "C:/Project/Main.py", line 2, in <module>
x = untangle.parse(r"C:\file.xml")
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\site-packages\untangle.py", line 177, in parse
parser.parse(filename)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\xml\sax\expatreader.py", line 111, in parse
xmlreader.IncrementalParser.parse(self, source)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\xml\sax\xmlreader.py", line 125, in parse
self.feed(buffer)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\xml\sax\expatreader.py", line 217, in feed
self._parser.Parse(data, isFinal)
File "..\Modules\pyexpat.c", line 668, in ExternalEntityRef
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\xml\sax\expatreader.py", line 413, in external_entity_ref
"")
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\xml\sax\saxutils.py", line 364, in prepare_input_source
f = urllib.request.urlopen(source.getSystemId())
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\Users\user\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
The web page has a huge list of journal names with other details. I am trying to scrape the table content into dataframe.
#http://www.citefactor.org/journal-impact-factor-list-2015.html
import bs4 as bs
import urllib #Using python 2.7
import pandas as pd
dfs = pd.read_html('http://www.citefactor.org/journal-impact-factor-list-2015.html/', header=0)
for df in dfs:
print(df)
df.to_csv('citefactor_list.csv', header=True)
But I am getting following error .. I did try referring to some already raised questions but could not fix.
Error:
Traceback (most recent call last):
File "scrape_impact_factor.py", line 7, in <module>
dfs = pd.read_html('http://www.citefactor.org/journal-impact-factor-list-2015.html/', header=0)
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 896, in read_html
keep_default_na=keep_default_na)
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 733, in _parse
raise_with_traceback(retained)
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 727, in _parse
tables = p.parse_tables()
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 196, in parse_tables
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 450, in _build_doc
return BeautifulSoup(self._setup_build_doc(), features='html5lib',
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 443, in _setup_build_doc
raw_text = _read(self.io)
File "/usr/local/lib/python2.7/dist-packages/pandas/io/html.py", line 130, in _read
with urlopen(obj) as url:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/pandas/io/common.py", line 60, in urlopen
with closing(_urlopen(*args, **kwargs)) as f:
File "/usr/lib/python2.7/urllib2.py", line 127, in urlopen
return _opener.open(url, data, timeout)
File "/usr/lib/python2.7/urllib2.py", line 410, in open
response = meth(req, response)
File "/usr/lib/python2.7/urllib2.py", line 523, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib/python2.7/urllib2.py", line 448, in error
return self._call_chain(*args)
File "/usr/lib/python2.7/urllib2.py", line 382, in _call_chain
result = func(*args)
File "/usr/lib/python2.7/urllib2.py", line 531, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
urllib2.HTTPError: HTTP Error 500: Internal Server Error
A 500 internal server error means something went wrong on the server and therefore is out of your control.
However the problem is that you are using the wrong URL.
If you go to http://www.citefactor.org/journal-impact-factor-list-2015.html/ in your browser you get a 404 not found error. Remove the trailing slash i.e. http://www.citefactor.org/journal-impact-factor-list-2015.html and it will work.
I'm using Django and i'm trying to run this lib 'translate' or 'goslate' so I can translate text from google translate in runtime and free.
for goslate:
this is my function
import goslate
gs = goslate.Goslate()
translate = gs.translate(txt,target,source)
when I work locally it's working great and I'm getting the translation for the given 'txt'
I deploy my django app to herokuapp.com I got an error
this is the error:
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 409, in translate
return _unwrapper_single_element(self._translate_single_text(text, target_language, source_language))
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 334, in _translate_single_text
results = list(self._execute(make_task(i) for i in split_text(text)))
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 203, in _execute
yield each()
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 332, in <lambda>
return lambda: self._basic_translate(text, target_language, source_lauguage)[0]
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 251, in _basic_translate
response_content = self._open_url(url)
File "/app/.heroku/python/lib/python2.7/site-packages/goslate.py", line 181, in _open_url
response = self._opener.open(request, timeout=self._TIMEOUT)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 437, in open
response = meth(req, response)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 550, in http_response
'http', request, response, code, msg, hdrs)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 469, in error
result = self._call_chain(*args)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 409, in _call_chain
result = func(*args)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 656, in http_error_302
return self.parent.open(new, timeout=req.timeout)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 437, in open
response = meth(req, response)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 550, in http_response
'http', request, response, code, msg, hdrs)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 475, in error
return self._call_chain(*args)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 409, in _call_chain
result = func(*args)
File "/app/.heroku/python/lib/python2.7/urllib2.py", line 558, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
HTTPError: HTTP Error 503: Service Unavailable
why locally it's working great and on heroku it's not? how can I fix it?
or a new translation lib that it's free
I found the problem,
google translate block the request from heroku
i need to use proxy server so google translate will not think that i'm a robot
there is an free app that i found in heroku named "fixie" i think it will do the trick