I tried to use calais.py library
and I run the following code
API_KEY ='token'
calais = Calais(api_key=API_KEY, submitter="my app")
print calais.analyze_url('https://www.python.org/download/releases/2.5.1/')
I get the following error:
*ValueError: Invalid request format - the request has missing or invalid parameters*
calais.py is here:
"""
python-calais v.1.4 -- Python interface to the OpenCalais API
Author: Jordan Dimov (jdimov#mlke.net)
Last-Update: 01/12/2009
"""
import httplib, urllib, re
import simplejson as json
from StringIO import StringIO
PARAMS_XML = """
<c:params xmlns:c="http://s.opencalais.com/1/pred/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"> <c:processingDirectives %s> </c:processingDirectives> <c:userDirectives %s> </c:userDirectives> <c:externalMetadata %s> </c:externalMetadata> </c:params>
"""
STRIP_RE = re.compile('<script.*?</script>|<noscript.*?</noscript>|<style.*?</style>', re.IGNORECASE)
__version__ = "1.4"
class AppURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.5) Gecko/2008121623 Ubuntu/8.10 (intrepid)Firefox/3.0.5" # Lie shamelessly to Wikipedia.
urllib._urlopener = AppURLopener()
class Calais():
"""
Python class that knows how to talk to the OpenCalais API. Use the analyze() and analyze_url() methods, which return CalaisResponse objects.
"""
api_key = None
processing_directives = {"contentType":"TEXT/RAW", "outputFormat":"application/json", "reltagBaseURL":None, "calculateRelevanceScore":"true", "enableMetadataType":None, "discardMetadata":None, "omitOutputtingOriginalText":"true"}
user_directives = {"allowDistribution":"false", "allowSearch":"false", "externalID":None}
external_metadata = {}
def __init__(self, api_key, submitter="python-calais client v.%s" % __version__):
self.api_key = api_key
self.user_directives["submitter"]=submitter
def _get_params_XML(self):
return PARAMS_XML % (" ".join('c:%s="%s"' % (k,v) for (k,v) in self.processing_directives.items() if v), " ".join('c:%s="%s"' % (k,v) for (k,v) in self.user_directives.items() if v), " ".join('c:%s="%s"' % (k,v) for (k,v) in self.external_metadata.items() if v))
def rest_POST(self, content):
params = urllib.urlencode({'licenseID':self.api_key, 'content':content, 'paramsXML':self._get_params_XML()})
headers = {"Content-type":"application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("api.opencalais.com:80")
conn.request("POST", "/enlighten/rest/", params, headers)
response = conn.getresponse()
data = response.read()
conn.close()
return (data)
def get_random_id(self):
"""
Creates a random 10-character ID for your submission.
"""
import string
from random import choice
chars = string.letters + string.digits
np = ""
for i in range(10):
np = np + choice(chars)
return np
def get_content_id(self, text):
"""
Creates a SHA1 hash of the text of your submission.
"""
import hashlib
h = hashlib.sha1()
h.update(text)
return h.hexdigest()
def preprocess_html(self, html):
html = html.replace('\n', '')
html = STRIP_RE.sub('', html)
return html
def analyze(self, content, content_type="TEXT/RAW", external_id=None):
if not (content and len(content.strip())):
return None
self.processing_directives["contentType"]=content_type
if external_id:
self.user_directives["externalID"] = external_id
return CalaisResponse(self.rest_POST(content))
def analyze_url(self, url):
f = urllib.urlopen(url)
html = self.preprocess_html(f.read())
return self.analyze(html, content_type="TEXT/HTML", external_id=url)
def analyze_file(self, fn):
import mimetypes
try:
filetype = mimetypes.guess_type(fn)[0]
except:
raise ValueError("Can not determine file type for '%s'" % fn)
if filetype == "text/plain":
content_type="TEXT/RAW"
f = open(fn)
content = f.read()
f.close()
elif filetype == "text/html":
content_type = "TEXT/HTML"
f = open(fn)
content = self.preprocess_html(f.read())
f.close()
else:
raise ValueError("Only plaintext and HTML files are currently supported. ")
return self.analyze(content, content_type=content_type, external_id=fn)
class CalaisResponse():
"""
Encapsulates a parsed Calais response and provides easy pythonic access to the data.
"""
raw_response = None
simplified_response = None
def __init__(self, raw_result):
try:
self.raw_response = json.load(StringIO(raw_result))
except:
raise ValueError(raw_result)
self.simplified_response = self._simplify_json(self.raw_response)
self.__dict__['doc'] = self.raw_response['doc']
for k,v in self.simplified_response.items():
self.__dict__[k] = v
def _simplify_json(self, json):
result = {}
# First, resolve references
for element in json.values():
for k,v in element.items():
if isinstance(v, unicode) and v.startswith("http://") and json.has_key(v):
element[k] = json[v]
for k, v in json.items():
if v.has_key("_typeGroup"):
group = v["_typeGroup"]
if not result.has_key(group):
result[group]=[]
del v["_typeGroup"]
v["__reference"] = k
result[group].append(v)
return result
def print_summary(self):
if not hasattr(self, "doc"):
return None
info = self.doc['info']
print "Calais Request ID: %s" % info['calaisRequestID']
if info.has_key('externalID'):
print "External ID: %s" % info['externalID']
if info.has_key('docTitle'):
print "Title: %s " % info['docTitle']
print "Language: %s" % self.doc['meta']['language']
print "Extractions: "
for k,v in self.simplified_response.items():
print "\t%d %s" % (len(v), k)
def print_entities(self):
if not hasattr(self, "entities"):
return None
for item in self.entities:
print "%s: %s (%.2f)" % (item['_type'], item['name'], item['relevance'])
def print_topics(self):
if not hasattr(self, "topics"):
return None
for topic in self.topics:
print topic['categoryName']
def print_relations(self):
if not hasattr(self, "relations"):
return None
for relation in self.relations:
print relation['_type']
for k,v in relation.items():
if not k.startswith("_"):
if isinstance(v, unicode):
print "\t%s:%s" % (k,v)
elif isinstance(v, dict) and v.has_key('name'):
print "\t%s:%s" % (k, v['name'])
The problem solved. It was complicated little because I was using old version. Thank you.
Related
I am mostly trying to create software that reads says the definition of every word you typed into the text box. Right now it only reads if there is one work and crashes if there is more than one. How would I go about fixing this?
import wolframalpha
client = wolframalpha.Client('8QR2WG-628657K83Q')
from multiprocessing import Process
import wikipedia
import PySimpleGUI as sg
import cv2
import random
import sys
import threading
import time
import nltk
nltk.download('punkt')
# from oxforddictionaries.words import OxfordDictionaries
# Oxford = OxfordDictionaries('b4170561','f32687e0ecbc219cfd723bb220dad34e')
# o = OxfordDictionaries('b4170561','f32687e0ecbc219cfd723bb220dad34e')
# relax = o.get_synonyms("Apple").json()
# synonyms = relax
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
filtered_list = []
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
nltk.download('averaged_perceptron_tagger')
stemmer = PorterStemmer()
trained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
trained_body_data = cv2.CascadeClassifier('haarcascade_upperbody.xml')
trained_eye_data = cv2.CascadeClassifier('haarcascade_eye.xml')
webcam = cv2.VideoCapture(0)
sg.theme('graygraygray')
layout = [ [sg.Text("Enter Test Text")],
[sg.Input()],
[sg.Button('Ok')] ]
window = sg.Window('You', layout)
sg.Popup('About Me','Hello I am an AI devolped by Garrett Provence. I will be using your webcam to scan your suroundings for a quick few seconds and will open a text box where you will be able to ask me questions. By clicking ok below you agree to letting me acess everyhting said before. I am still in beta so please be patient.')
timeout = time.time() + 10;
while True:
##Webcam scanner
def infiniteloop1():
while True:
test = 0
if test == 5 or time.time() > timeout:
break
test = test - 1
successful_frame_read, frame = webcam.read()
grayscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coordinates = trained_face_data.detectMultiScale(grayscaled_img)
body_coordinates = trained_body_data.detectMultiScale(grayscaled_img)
eye_coordinates = trained_eye_data.detectMultiScale(grayscaled_img)
for (x,y,w,h) in face_coordinates:
cv2.rectangle(frame, (x, y),(x+w, y+h), (0,random.randrange(255),0), 2)
for (x,y,w,h) in body_coordinates:
cv2.rectangle(frame, (x, y),(x+w, y+h), (0,0,255), 2)
for (x,y,w,h) in eye_coordinates:
cv2.rectangle(frame, (x, y),(x+w, y+h), (random.randrange(255),0,0), 2)
cv2.imshow('FaceThing',frame)
cv2.waitKey(1)
thread1 = threading.Thread(target=infiniteloop1)
thread1.start()
event, values = window.read()
InputText = values[0]
import json
import requests
import os
import pprint
import Oxfordwords
from Oxfordwords import Word
import pprint
##end OF webcam scanner
#img = cv2.imread('Rdj.png')
while True:
##Test Text Scanner --
Text = values[0]
Word.get(Text)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
sys.exit()
try:
words_in_excerpt = word_tokenize(Text)
nltk.pos_tag(words_in_excerpt)
print('Hello',nltk.pos_tag(words_in_excerpt), "")
sg.Popup('Test', nltk.pos_tag(words_in_excerpt))
sg.Popup('Def',Word.definitions())
break
except:
sg.Popup('There seems to be a error processing what you have said')
break
##End of test Text Scanner --
The oxford dictonary code -
#!/bin/env python3
""" oxford dictionary api """
from http import cookiejar
import requests
from bs4 import BeautifulSoup as soup
class WordNotFound(Exception):
""" word not found in dictionary (404 status code) """
pass
class BlockAll(cookiejar.CookiePolicy):
""" policy to block cookies """
return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False
netscape = True
rfc2965 = hide_cookie2 = False
class Word(object):
""" retrive word info from oxford dictionary website """
entry_selector = '#entryContent > .entry'
header_selector = '.top-container'
title_selector = header_selector + ' .headword'
wordform_selector = header_selector + ' .pos'
property_global_selector = header_selector + ' .grammar'
br_pronounce_selector = '[geo=br] .phon'
am_pronounce_selector = '[geo=n_am] .phon'
br_pronounce_audio_selector = '[geo=br] [data-src-ogg]'
am_pronounce_audio_selector = '[geo=n_am] [data-src-ogg]'
definition_body_selector = '.senses_multiple'
namespaces_selector = '.senses_multiple > .shcut-g'
examples_selector = '.senses_multiple .sense > .examples .x'
definitions_selector = '.senses_multiple .sense > .def'
extra_examples_selector = '.res-g [title="Extra examples"] .x-gs .x'
phrasal_verbs_selector = '.phrasal_verb_links a'
idioms_selector = '.idioms > .idm-g'
other_results_selector = '#rightcolumn #relatedentries'
soup_data = None
#classmethod
def get_url(cls, word):
""" get url of word definition """
baseurl = 'https://www.oxfordlearnersdictionaries.com/definition/english/'
return baseurl + word
#classmethod
def delete(cls, selector):
""" remove tag with specified selector in cls.soup_data """
try:
for tag in cls.soup_data.select(selector):
tag.decompose()
except IndexError:
pass
#classmethod
def get(cls, word):
""" get html soup of word """
req = requests.Session()
req.cookies.set_policy(BlockAll())
page_html = req.get(cls.get_url(word), timeout=5, headers={'User-agent': 'mother animal'})
if page_html.status_code == 404:
raise WordNotFound
else:
cls.soup_data = soup(page_html.content, 'html.parser')
if cls.soup_data is not None:
# remove some unnecessary tags to prevent false positive results
cls.delete('[title="Oxford Collocations Dictionary"]')
cls.delete('[title="British/American"]') # edge case: 'phone'
cls.delete('[title="Express Yourself"]')
cls.delete('[title="Collocations"]')
cls.delete('[title="Word Origin"]')
#classmethod
def other_results(cls):
""" get similar words, idioms, phrases...
Return: {
'All matches': [
{'word1': word1, 'id1': id1, 'wordform1': wordform1},
{'word2': word2, 'id2': id2, 'wordform2': wordform2}
...
]
'Phrasal verbs': [
{'word1': word1, 'id1': id1, 'wordform1': wordform1},
{'word2': word2, 'id2': id2, 'wordform2': wordform2}
...
]
...
}
"""
info = []
try:
rightcolumn_tags = cls.soup_data.select(cls.other_results_selector)[0]
except IndexError:
return None
# there can be multiple other results table like All matches, Phrasal verbs, Idioms,...
header_tags = rightcolumn_tags.select('dt')
other_results_tags = rightcolumn_tags.select('dd')
# loop each other result table
for header_tag, other_results_tag in zip(header_tags, other_results_tags):
header = header_tag.text
other_results = []
for item_tag in other_results_tag.select('li'):
names = item_tag.select('span')[0].find_all(text=True, recursive=False)
wordform_tag = item_tag.select('pos')
names.append(wordform_tag[0].text if len(wordform_tag) > 0 else '')
other_results.append(names)
other_results = list(filter(None, other_results)) # remove empty list
ids = [cls.extract_id(tag.attrs['href'])
for tag in other_results_tag.select('li a')]
results = []
for other_result, id in zip(other_results, ids):
result = {}
result['name'] = ' '.join(list(map(lambda x: x.strip(), other_result[0:-1])))
result['id'] = id
try:
result['wordform'] = other_result[-1].strip()
except IndexError:
pass
results.append(result)
info.append({header: results})
return info
#classmethod
def name(cls):
""" get word name """
if cls.soup_data is None:
return None
return cls.soup_data.select(cls.title_selector)[0].text
#classmethod
def id(cls):
""" get id of a word. if a word has definitions in 2 seperate pages
(multiple wordform) it will return 'word_1' and 'word_2' depend on
which page it's on """
if cls.soup_data is None:
return None
return cls.soup_data.select(cls.entry_selector)[0].attrs['id']
#classmethod
def wordform(cls):
""" return wordform of word (verb, noun, adj...) """
if cls.soup_data is None:
return None
try:
return cls.soup_data.select(cls.wordform_selector)[0].text
except IndexError:
return None
#classmethod
def property_global(cls):
""" return global property (apply to all definitions) """
if cls.soup_data is None:
return None
try:
return cls.soup_data.select(cls.property_global_selector)[0].text
except IndexError:
return None
#classmethod
def get_prefix_from_filename(cls, filename):
""" get prefix (NAmE or BrE) from audio name when prefix is null """
if '_gb_' in filename:
return 'BrE'
elif '_us_' in filename:
return 'NAmE'
return None
#classmethod
def pronunciations(cls):
""" get britain and america pronunciations """
if cls.soup_data is None:
return None
britain = {'prefix': None, 'ipa': None, 'url': None}
america = {'prefix': None, 'ipa': None, 'url': None}
try:
britain_pron_tag = cls.soup_data.select(cls.br_pronounce_selector)[0]
america_pron_tag = cls.soup_data.select(cls.am_pronounce_selector)[0]
britain['ipa'] = britain_pron_tag.text
britain['prefix'] = 'BrE'
america['ipa'] = america_pron_tag.text
america['prefix'] = 'nAmE'
except IndexError:
pass
try:
britain['url'] = cls.soup_data.select(cls.br_pronounce_audio_selector)[0].attrs['data-src-ogg']
america['url'] = cls.soup_data.select(cls.am_pronounce_audio_selector)[0].attrs['data-src-ogg']
except IndexError:
pass
if britain['prefix'] == None and britain['url'] is not None:
britain['prefix'] = cls.get_prefix_from_filename(britain['url'])
if america['prefix'] == None and america['url'] is not None:
america['prefix'] = cls.get_prefix_from_filename(america['url'])
return [britain, america]
#classmethod
def extract_id(cls, link):
""" get word id from link
Argument: https://abc/definition/id
Return: id
"""
return link.split('/')[-1]
#classmethod
def get_references(cls, tags):
""" get info about references to other page
Argument: soup.select(<selector>)
Return: [{'id': <id>, 'name': <word>}, {'id': <id2>, 'name': <word2>}, ...]
"""
if cls.soup_data is None:
return None
references = []
for tag in tags.select('.xrefs a'): # see also <external link>
id = cls.extract_id(tag.attrs['href'])
word = tag.text
references.append({'id': id, 'name': word})
return references
#classmethod
def references(cls):
""" get global references """
if cls.soup_data is None:
return None
header_tag = cls.soup_data.select(cls.header_selector)[0]
return cls.get_references(header_tag)
#classmethod
def definitions(cls, full=False):
""" Return: list of definitions """
if cls.soup_data is None:
return None
if not full:
return [tag.text for tag in cls.soup_data.select(cls.definitions_selector)]
return cls.definition_full()
#classmethod
def examples(cls):
""" List of all examples (not categorized in seperate definitions) """
if cls.soup_data is None:
return None
return [tag.text for tag in cls.soup_data.select(cls.examples_selector)]
#classmethod
def phrasal_verbs(cls):
""" get phrasal verbs list (verb only) """
if cls.soup_data is None:
return None
phrasal_verbs = []
for tag in cls.soup_data.select(cls.phrasal_verbs_selector):
phrasal_verb = tag.select('.xh')[0].text
id = cls.extract_id(tag.attrs['href']) # https://abc/definition/id -> id
phrasal_verbs.append({'name': phrasal_verb, 'id': id})
return phrasal_verbs
#classmethod
def _parse_definition(cls, parent_tag):
""" return word definition + corresponding examples
A word can have a single (None) or multiple namespaces
Each namespace can have one or many definitions
Each definitions can have one, many or no examples
Some words can have specific property
(transitive/intransitive/countable/uncountable/singular/plural...)
A verb can have phrasal verbs
"""
if cls.soup_data is None:
return None
definition = {}
try: # property (countable, transitive, plural,...)
definition['property'] = parent_tag.select('.grammar')[0].text
except IndexError:
pass
try: # label: (old-fashioned), (informal), (saying)...
definition['label'] = parent_tag.select('.labels')[0].text
except IndexError:
pass
try: # refer to something (of people, of thing,...)
definition['refer'] = parent_tag.select('.dis-g')[0].text
except IndexError:
pass
definition['references'] = cls.get_references(parent_tag)
if not definition['references']:
definition.pop('references', None)
try: # sometimes, it just refers to other page without having a definition
definition['description'] = parent_tag.select('.def')[0].text
except IndexError:
pass
definition['examples'] = [example_tag.text
for example_tag in parent_tag.select('.examples .x')]
definition['extra_example'] = [
example_tag.text
for example_tag in parent_tag.select('[unbox=extra_examples] .examples .unx')
]
return definition
#classmethod
def definition_full(cls):
""" return word definition + corresponding examples
A word can have a single (None) or multiple namespaces
Each namespace can have one or many definitions
Each definitions can have one, many or no examples
Some words can have specific property
(transitive/intransitive/countable/uncountable/singular/plural...)
A verb can have phrasal verbs
"""
if cls.soup_data is None:
return None
namespace_tags = cls.soup_data.select(cls.namespaces_selector)
info = []
for namespace_tag in namespace_tags:
try:
namespace = namespace_tag.select('h2.shcut')[0].text
except IndexError:
# some word have similar definitions grouped in a multiple namespaces (time)
# some do not, and only have one namespace (woman)
namespace = None
definitions = []
definition_full_tags = namespace_tag.select('.sense')
for definition_full_tag in definition_full_tags:
definition = cls._parse_definition(definition_full_tag)
definitions.append(definition)
info.append({'namespace': namespace, 'definitions': definitions})
# no namespace. all definitions is global
if len(info) == 0:
info.append({'namespace': '__GLOBAL__', 'definitions': []})
def_body_tags = cls.soup_data.select(cls.definition_body_selector)
definitions = []
definition_full_tags = def_body_tags[0].select('.sense')
for definition_full_tag in definition_full_tags:
definition = cls._parse_definition(definition_full_tag)
definitions.append(definition)
info[0]['definitions'] = definitions
return info
#classmethod
def idioms(cls):
""" get word idioms
Idioms dont have namespace like regular definitions
Each idioms have one or more definitions
Each definitions can have one, many or no examples
"""
idiom_tags = cls.soup_data.select(cls.idioms_selector)
idioms = []
for idiom_tag in idiom_tags:
try:
# sometimes idiom is in multiple idm classes inside
# one idm-l class instead of a single idm class
idiom = idiom_tag.select('.idm-l')[0].text
except IndexError:
idiom = idiom_tag.select('.idm')[0].text
global_definition = {}
try: # label: (old-fashioned), (informal), (saying)...
global_definition['label'] = idiom_tag.select('.labels')[0].text
except IndexError:
pass
try: # refer to something (of people, of thing,...)
global_definition['refer'] = idiom_tag.select('.dis-g')[0].text
except IndexError:
pass
global_definition['references'] = cls.get_references(idiom_tag)
if not global_definition['references']:
global_definition.pop('references', None)
definitions = []
# one idiom can have multiple definitions, each can have multiple examples or no example
for definition_tag in idiom_tag.select('.sense'):
definition = {}
try: # sometimes, it just refers to other page without having a definition
definition['description'] = definition_tag.select('.def')[0].text
except IndexError:
pass
try: # label: (old-fashioned), (informal), (saying)...
definition['label'] = definition_tag.select('.labels')[0].text
except IndexError:
pass
try: # refer to something (of people, of thing,...)
definition['refer'] = definition_tag.select('.dis-g')[0].text
except IndexError:
pass
definition['references'] = cls.get_references(definition_tag)
if not definition['references']:
definition.pop('references', None)
definition['examples'] = [example_tag.text for example_tag in definition_tag.select('.x')]
definitions.append(definition)
idioms.append({'name': idiom, 'summary': global_definition, 'definitions': definitions})
return idioms
#classmethod
def info(cls):
""" return all info about a word """
if cls.soup_data is None:
return None
word = {
'id': cls.id(),
'name': cls.name(),
'wordform': cls.wordform(),
'pronunciations': cls.pronunciations(),
'property': cls.property_global(),
'definitions': cls.definitions(full=True),
'idioms': cls.idioms(),
'other_results': cls.other_results()
}
if not word['property']:
word.pop('property', None)
if not word['other_results']:
word.pop('other_results', None)
if word['wordform'] == 'verb':
word['phrasal_verbs'] = cls.phrasal_verbs()
return word
Any help will be appreciated thank you:)
just split values[0] into words and call Word.get(...) on each
import re
while True:
##Test Text Scanner --
words = re.findall(r"\w+", values[0].strip()) # can also use nltk.word_tokenize
for word in words:
Word.get(word)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
try:
words_in_excerpt = word_tokenize(Text)
nltk.pos_tag(words_in_excerpt)
print('Hello', nltk.pos_tag(words_in_excerpt), "")
sg.Popup('Test', nltk.pos_tag(words_in_excerpt))
sg.Popup('Def', Word.definitions())
break
except:
sg.Popup('There seems to be a error processing what you have said')
break
I have a python function that uses the Tensorflow library to image recognition. And I deployed it on IBM Cloud Functions(Openwhisk) through Dockerfile and invoke it from wsk CLI. But when I invoke the function, the following error is displayed:
{
"error": "The action did not return a dictionary."
}
How do I change the output type? Here is the function code:
import boto3
import numpy as np
import os.path
import re
from urllib.request import urlretrieve
import json
from botocore.client import Config
import mimetypes
import os
import requests
SESSION = None
bucket = '---'
def main(event, context):
global bucket
global SESSION
if not os.path.exists('/tmp/imagenet/'):
os.makedirs('/tmp/imagenet/')
if SESSION is None:
downloadFromS3(bucket,'Imagenet/imagenet_2012_challenge_label_map_proto.pbtxt','/tmp/imagenet/imagenet_2012_challenge_label_map_proto.pbtxt')
downloadFromS3(bucket,'Imagenet/imagenet_synset_to_human_label_map.txt','/tmp/imagenet/imagenet_synset_to_human_label_map.txt')
strFile = '/tmp/imagenet/inputimage.png'
if ('queryStringParameters' in event):
if (event['queryStringParameters'] is not None):
if ('url' in event['queryStringParameters']):
urlretrieve(event['queryStringParameters']['url'], strFile)
else:
downloadFromS3(bucket,'Imagenet/inputimage.png',strFile)
else:
downloadFromS3(bucket,'Imagenet/inputimage.png',strFile)
else:
downloadFromS3(bucket,'Imagenet/inputimage.png',strFile)
strResult = run_inference_on_image(strFile)
return {
'statusCode': 200,
'body': json.dumps(strResult)
}
def run_inference_on_image(image):
image_data = tf.gfile.FastGFile(image, 'rb').read()
global SESSION
if SESSION is None:
SESSION = tf.InteractiveSession()
create_graph()
softmax_tensor = tf.get_default_graph().get_tensor_by_name('softmax:0')
predictions = SESSION.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-5:][::-1]
node_lookup = NodeLookup()
strResult = '%s (score = %.5f)' % (node_lookup.id_to_string(top_k[0]), predictions[top_k[0]])
vecStr = []
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
vecStr.append('%s (score = %.5f)' % (human_string, score))
return vecStr
def downloadFromS3(bucket,strKey,strFile):
s3_client = boto3.client('s3')
s3_client.download_file(bucket, strKey, strFile)
def getObject(bucket,strKey):
s3_client = boto3.client('s3')
s3_response_object = s3_client.get_object(Bucket=bucket, Key=strKey)
return s3_response_object['Body'].read()
def create_graph():
global bucket
graph_def = tf.GraphDef()
graph_def.ParseFromString(getObject(bucket,'Imagenet/classify_image_graph_def.pb'))
_ = tf.import_graph_def(graph_def, name='')
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
'/tmp/imagenet/', 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
'/tmp/imagenet/', 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
As the error states, whatever code you have written does not return a dictionary.
from my experience with Openwhisk to date, for it to understand what it has to display/compute, the o/p should be in the form of a dict.
So change it to that and you'll be good to go
code is given below
import time
import unittest
import logging as log
from loggenerator import set_log_params,move_latest_log_to_persistent_file
from parse_test_json import get_test_params
from scrapping import SC
class ScrapeTest(unittest.TestCase):
def setup(self):
self.start_time=time.time()
def teardown(self):
self.end_time=time.time()
def test_SC_scrape(self):
try:
test_name="test scrape"
set_log_params(log_file=test_name,level=log.INFO)
log.info("step1:set all test params")
test_params = get_test_params(self.__class__.__name__, test_name=test_name)
log.info("step 2:set")
ik=SC()
log.info("step3:calling scrapper for")
log.debug(ik.parseURL(party_name=test_params["party_name"],start_date=test_params["start_date"],end_date=test_params["end_date"]))
except Exception as e:
raise e
move_latest_log_to_persistent_file(log_file=test_name)
####
import json, os
from builtins import *
def get_test_params(test_class_name = None, test_name = None):
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = "/test_param_jsons/" + test_class_name + "params.json"
json_file = dir_path + file_path
with open(json_file) as test_data:
test_json = json.load(test_data)
return test_json[test_class_name][test_name]
this function is raising error key error.
this should work as long as you have a json file available at: <SCRIPT_PATH>/test_param_jsons/MyClass_params.json
Also, in order to avoid KeyError you'll need to ensure that your input json file contains key: value, test_class_name : test_name
import json, os
from builtins import *
class MyClass:
def get_test_params (self, test_class_name = None, test_name = None):
with open(os.path.join(os.path.dirname(__file__),"test_param_jsons\\params.json"), 'r') as test_data:
test_json = json.load (test_data)
try:
return test_json[test_class_name][test_name]
except KeyError as e:
print ('KeyError: {}'.format (e))
def mymain(self, test_name):
''' mymain is defined to accomodate your requirement to use __class__ as a parameter '''
test_params = self.get_test_params (self.__class__.__name__, test_name = test_name)
return test_params
if __name__ == '__main__':
test_name = 'sth'
myclass = MyClass ()
result = myclass.mymain (test_name)
print (result)
I am using Blockchain.info's API to send multiple payments. I believe I have everything how it should be however when I run the code I get the following Error: RuntimeError: ERROR: Invalid Recipients JSON. Please make sure it is url encoded and consult the docs. The docs can be found here: https://blockchain.info/api/blockchain_wallet_api
The Python library I am using can be found here: https://github.com/p4u/blockchain.py/blob/master/blockchain.py
The only other post on this issue is posted by the original creator of the library, he said the problem was that the amounts cannot be a decimal, mine are not however.Post can be found here: https://bitcointalk.org/index.php?topic=600870.0
Here is my code:
from __future__ import print_function
from itertools import islice, imap
import csv, requests, json, math
from collections import defaultdict
import requests
import urllib
import json
from os.path import expanduser
import configparser
class Wallet:
guid = 'g'
isAccount = 0
isKey = 0
password1 = 'x'
password2 = 'y'
url = ''
def __init__(self, guid = 'g', password1 = 'x', password2 = 'y'):
if guid.count('-') > 0:
self.isAccount = 1
if password1 == '': # wallet guid's contain -
raise ValueError('No password with guid.')
else:
self.isKey = 1
self.guid = guid
self.url = 'https://blockchain.info/merchant/' + guid + '/'
self.password1 = password1
self.password2 = password2
r = requests.get('http://api.blockcypher.com/v1/btc/main/addrs/A/balance')
balance = r.json()['balance']
with open("Entries#x1.csv") as f,open("winningnumbers.csv") as nums:
nums = set(imap(str.rstrip, nums))
r = csv.reader(f)
results = defaultdict(list)
for row in r:
results[sum(n in nums for n in islice(row, 1, None))].append(row[0])
self.number_matched_0 = results[0]
self.number_matched_1 = results[1]
self.number_matched_2 = results[2]
self.number_matched_3 = results[3]
self.number_matched_4 = results[4]
self.number_matched_5 = results[5]
self.number_matched_5_json = json.dumps(self.number_matched_5, sort_keys = True, indent = 4)
print(self.number_matched_5_json)
if len(self.number_matched_3) == 0:
print('Nobody matched 3 numbers')
else:
self.tx_amount_3 = int((balance*0.001)/ len(self.number_matched_3))
if len(self.number_matched_4) == 0:
print('Nobody matched 4 numbers')
else:
self.tx_amount_4 = int((balance*0.1)/ len(self.number_matched_4))
if len(self.number_matched_5) == 0:
print('Nobody matched 3 numbers')
else:
self.tx_amount_5 = int((balance*0.4)/ len(self.number_matched_5))
self.d = {el: self.tx_amount_5 for el in json.loads(self.number_matched_5_json)}
print(self.d)
self.d_url_enc = urllib.urlencode(self.d)
def Call(self, method, data = {}):
if self.password1 != '':
data['password'] = self.password1
if self.password2 != '':
data['second_password'] = self.password2
response = requests.post(self.url + method,params=data)
json = response.json()
if 'error' in json:
raise RuntimeError('ERROR: ' + json['error'])
return json
def SendPayment(self, toaddr, amount, fromaddr = 'A', shared = 0, fee = 0.0001, note = True):
data = {}
data['to'] = toaddr
data['amount'] = self.tx_amount_5
data['fee'] = fee
data['recipients'] = self.d_url_enc
if fromaddr:
data['from'] = fromaddr
if shared:
data['shared'] = 'true'
if note:
data['note'] = 'n'
response = self.Call('payment',data)
def SendManyPayment(self, fromaddr = True, shared = False, fee = 0.0001, note = True):
data = {}
recipients = self.d_url_enc
data['recipients'] = recipients.__str__().replace("'",'"')
data['fee'] = str(fee)
if fromaddr:
data['from'] = 'A'
if shared:
data['shared'] = 'true'
else:
data['shared'] = 'false'
if note:
data['note'] = 'n'
response = self.Call('sendmany',data)
return response
print(Wallet().SendManyPayment())
Complete runtime error: Traceback (most recent call last):
File "D:\Documents\B\Code\A\jsontest.py", line 125, in <module>
print(Wallet().SendManyPayment())
File "D:\Documents\B\Code\A\jsontest.py", line 121, in SendManyPayment
response = self.Call('sendmany',data)
File "D:\Documents\B\Code\A\jsontest.py", line 86, in Call
raise RuntimeError('ERROR: ' + json['error'])
RuntimeError: ERROR: Invalid Recipients JSON. Please make sure it is url encoded and consult the docs.
What does data['recipients'] contain inside of your SendManyPayment() function? It looks like you are trying to do some manual encoding instead of using json.dumps(recipients)
The docs say it should look like this:
{
"1JzSZFs2DQke2B3S4pBxaNaMzzVZaG4Cqh": 100000000,
"12Cf6nCcRtKERh9cQm3Z29c9MWvQuFSxvT": 1500000000,
"1dice6YgEVBf88erBFra9BHf6ZMoyvG88": 200000000
}
Try this out for send many:
def SendManyPayment(self, fromaddr = True, shared = False, fee = 0.0001, note = True):
data = {}
recipients = self.d_url_enc
# recipients should be a json string FIRST!
data['recipients'] = json.dumps(recipients)
data['fee'] = str(fee)
if fromaddr:
data['from'] = 'A'
if shared:
data['shared'] = 'true'
else:
data['shared'] = 'false'
if note:
data['note'] = 'n'
response = self.Call('sendmany',data)
return response
I am getting an unexpected error when using this. The first section is from a script that I found online, and I am trying to use it to pull a particular section identified in the PDF's outline. Everything works fine, except right at output.write(outputfile1) it says:
PdfReadError: multiple definitions in dictionary.
Anybody else run into this? Please forgive all the unnecessary prints at the end. :)
import pyPdf
import glob
class Darrell(pyPdf.PdfFileReader):
def getDestinationPageNumbers(self):
def _setup_outline_page_ids(outline, _result=None):
if _result is None:
_result = {}
for obj in outline:
if isinstance(obj, pyPdf.pdf.Destination):
_result[(id(obj), obj.title)] = obj.page.idnum
elif isinstance(obj, list):
_setup_outline_page_ids(obj, _result)
return _result
def _setup_page_id_to_num(pages=None, _result=None, _num_pages=None):
if _result is None:
_result = {}
if pages is None:
_num_pages = []
pages = self.trailer["/Root"].getObject()["/Pages"].getObject()
t = pages["/Type"]
if t == "/Pages":
for page in pages["/Kids"]:
_result[page.idnum] = len(_num_pages)
_setup_page_id_to_num(page.getObject(), _result, _num_pages)
elif t == "/Page":
_num_pages.append(1)
return _result
outline_page_ids = _setup_outline_page_ids(self.getOutlines())
page_id_to_page_numbers = _setup_page_id_to_num()
result = {}
for (_, title), page_idnum in outline_page_ids.iteritems():
result[title] = page_id_to_page_numbers.get(page_idnum, '???')
return result
for fileName in glob.glob("*.pdf"):
output = pyPdf.PdfFileWriter()
print fileName
pdf = Darrell(open(fileName, 'rb'))
template = '%-5s %s'
print template % ('page', 'title')
for p,t in sorted([(v,k) for k,v in pdf.getDestinationPageNumbers().iteritems()]):
print template % (p+1,t)
for p,t in sorted([(v,k) for k,v in pdf.getDestinationPageNumbers().iteritems()]):
if t == "CATEGORY 1":
startpg = p+1
print p+1,'is the first page of Category 1.'
if t == "CATEGORY 2":
endpg = p+1
print p+1,'is the last page of Category 1.'
print startpg, endpg
pagenums = range(startpg,endpg)
print pagenums
for i in pagenums:
output.addPage(pdf.getPage(i))
fileName2 = "%sCategory1_data.pdf" % (str(fileName[:-13]))
print "%s has %s pages." % (fileName2,output.getNumPages())
outputfile1 = file(r"%s" % (fileName2), 'wb')
output.write(outputfile1)
outputfile1.close()
I know it might be too late for you, but for anyone else who will stumble here to look for the answer:
I had the same problem today, setting:
export_reader = PdfFileReader(filename, strict=False)
If you are just merging, then use:
merger = PdfFileMerger(strict=False)
This way, you will get only a warning, rather than an exception.