Error getting json using oauthlib python - python

Im trying to get a pretty json but always have the same error expected string or buffer my code is below
import urllib2
import json
import logging
from libs.requests_oauthlib import OAuth1Session
import libs.requests2
coke = OAuth1Session('user_key',
client_secret='user_secret',
resource_owner_key='key',
resource_owner_secret='key_secret')
headers = {'content-type': 'application/json'}
url = "http://test.hdp.pengostores.mx/api/rest/orders"
response = coke.get(url,headers=headers)
self.response.out.write(response.text)
My log:
<type 'exceptions.Exception'> (/base/data/home/apps/s~precise-line-76299minutos/devvic.398776853921596377/delivery/handlers.py:5278)
Traceback (most recent call last):
File "/base/data/home/apps/s~precise-line-76299minutos/devvic.398776853921596377/delivery/handlers.py", line 5274, in get
response = json.loads(coke.get(url,headers=headers))
File "/base/data/home/runtimes/python27/python27_dist/lib/python2.7/json/__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "/base/data/home/runtimes/python27/python27_dist/lib/python2.7/json/decoder.py", line 365, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
TypeError: expected string or buffer

In error message you use
json.loads( coke.get(...) )
but get() returns response object - you have to use get().text or get().content (instead of get()) to get text or bytes.
Or try
data = coke.get(...).json()
to get directly JSON converted to Python data.
EDIT: to pretty print Python data use pprint.pprint(data) or convert Python data to JSON string using json.dumps()
See example from doc: https://docs.python.org/2/library/json.html
Pretty printing:
>>> import json
>>> print json.dumps({'4': 5, '6': 7}, sort_keys=True,
... indent=4, separators=(',', ': '))
{
"4": 5,
"6": 7
}

Related

How to properly write a python function that reads keys from a separate json file

I am working on a python script that will read json and look for a specific key. So far, I can get it to work if I include my json values as a variable. However, my end goal is to read the json values from a file. Here is the code that I've been working on:
import jwt
import os
jwks = {
"keys": [
{
"kty": "RSA",
"use": "sig",
"kid": "123456ABCDEF",
"x5t": "123456ABCDEF",
"n": "qa9f6h6h52XbX0iAgxKgEDlRpbJw",
"e": "AQAB",
"x5c": [
"43aw7PQjxt4/MpfNMS2BfZ5F8GVSVG7qNb352cLLeJg5rc398Z"
]
},
{
"kty": "RSA",
"use": "sig",
"kid": "987654ghijklmnoP",
"x5t": "987654ghijklmnoP",
"n": "qa9f6h6h52XbX0iAgxKgEDlRpbJw",
"e": "AQAB",
"x5c": [
"1234R46Qjxt4/MpfNMS2BfZ5F8GVSVG7qNb352cLLeJg5rc398Z"
]
}
]
}
class InvalidAuthorizationToken(Exception):
def __init__(self, details):
super().__init__('Invalid authorization token: ' + details)
def get_jwk(kid):
for jwk in jwks.get('keys'):
if jwk.get('kid') == kid:
print ('This is jwk:', jwk)
return jwk
raise InvalidAuthorizationToken('kid not recognized')
#Execute
get_jwk('123456ABCDEF')
get_jwk('987654ghijklmnoP')
Here, what I am trying to do is replace those same values and store them in a separate file (jwks-keys) and read it in as a variable. However, I'm getting the following error and don't understand what i've done. How do I properly construct this function?
Here's the trace:
Traceback (most recent call last):
File "printjwks2.py", line 59, in <module>
get_jwk('123456ABCDEF')
File "printjwks2.py", line 51, in get_jwk
for jwk in jwks.get('keys'):
AttributeError: 'str' object has no attribute 'get'
Here's the function:
def get_jwk(kid):
with open('testkeys/jwks-keys', 'r') as az:
jwks = az.read()
for jwk in jwks.get('keys'):
if jwk.get('kid') == kid:
print (jwk)
return jwk
raise InvalidAuthorizationToken('kid not recognized')
with open('testkeys/jwks-keys', 'r') as az:
jwks = az.read()
Here jwks is just a string object because az.read() returns it as a string . So you have to Deserialize the JSON data into python objects before applying jwks.get('keys').
>>> sample_json = '{"json_key": "json_value"}'
>>> sample_json.get("json_key")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'str' object has no attribute 'get'
>>> import json
>>> json.loads(sample_json).get("json_key")
'json_value'
Since you are reading JSON from a file, you can use json.load to deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a Python object
>>> import json
>>> with open('testkeys/jwks-keys', 'r') as az:
... jwks = json.load(az)

UnicodeDecodeError for md5 id bulk importing data into elasticsearch

I have written a simple python script to import data into elasticsearch using bulk API.
# -*- encoding: utf-8 -*-
import csv
import datetime
import hashlib
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from dateutil.relativedelta import relativedelta
ORIGINAL_FORMAT = '%y-%m-%d %H:%M:%S'
INDEX_PREFIX = 'my-log'
INDEX_DATE_FORMAT = '%Y-%m-%d'
FILE_ADDR = '/media/zeinab/ZiZi/Elastic/python/elastic-test/elasticsearch-import-data/sample_data/sample.csv'
def set_data(input_file):
with open(input_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
sendtime = datetime.datetime.strptime(row['sendTime'].split('.')[0], ORIGINAL_FORMAT)
yield {
"_index": '{0}-{1}_{2}'.format(
INDEX_PREFIX,
sendtime.replace(day=1).strftime(INDEX_DATE_FORMAT),
(sendtime.replace(day=1) + relativedelta(months=1)).strftime(INDEX_DATE_FORMAT)),
"_type": 'data',
'_id': hashlib.md5("{0}{1}{2}{3}{4}".format(sendtime, row['IMSI'], row['MSISDN'], int(row['ruleRef']), int(row['sponsorRef']))).digest(),
"_source": {
'body': {
'status': int(row['status']),
'sendTime': sendtime
}
}
}
if __name__ == "__main__":
es = Elasticsearch(['http://{0}:{1}'.format('my.host.ip.addr', 9200)])
es.indices.delete(index='*')
success, _ = bulk(es, set_data(FILE_ADDR))
This comment helped me on writing/using set_data method.
Unfortunately I get this exception:
/usr/bin/python2.7 /media/zeinab/ZiZi/Elastic/python/elastic-test/elasticsearch-import-data/import_bulk_data.py
Traceback (most recent call last):
File "/media/zeinab/ZiZi/Elastic/python/elastic-test/elasticsearch-import-data/import_bulk_data.py", line 59, in <module>
success, _ = bulk(es, set_data(source_file))
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/helpers/__init__.py", line 257, in bulk
for ok, item in streaming_bulk(client, actions, **kwargs):
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/helpers/__init__.py", line 180, in streaming_bulk
client.transport.serializer):
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/helpers/__init__.py", line 60, in _chunk_actions
action = serializer.dumps(action)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/serializer.py", line 50, in dumps
raise SerializationError(data, e)
elasticsearch.exceptions.SerializationError: ({u'index': {u'_type': 'data', u'_id': '8\x1dI\xa2\xe9\xa2H-\xa6\x0f\xbd=\xa7CY\xa3', u'_index': 'my-log-2017-04-01_2017-05-01'}}, UnicodeDecodeError('utf8', '8\x1dI\xa2\xe9\xa2H-\xa6\x0f\xbd=\xa7CY\xa3', 3, 4, 'invalid start byte'))
Process finished with exit code 1
I can insert this data into elasticsearch successfully using index API:
es.index(index='{0}-{1}_{2}'.format(
INDEX_PREFIX,
sendtime.replace(day=1).strftime(INDEX_DATE_FORMAT),
(sendtime.replace(day=1) + relativedelta(months=1)).strftime(INDEX_DATE_FORMAT)
),
doc_type='data',
id=hashlib.md5("{0}{1}{2}{3}{4}".format(sendtime, row['IMSI'], row['MSISDN'], int(row['ruleRef']), int(row['sponsorRef']))).digest(),
body={
'status': int(row['status']),
'sendTime': sendtime
}
)
But the issue with index API is that it's very slow; it needs about 2 seconds to import just 50 records. I hoped bulk API would help me on the speed.
According to the hashlib documentation, the digest method will
Return the digest of the data passed to the update() method so far. This is a bytes object of size digest_size which may contain bytes in the whole range from 0 to 255.
So the resulting bytes may not decodeable to unicode.
>>> id_ = hashlib.md5('abc'.encode('utf-8')).digest()
>>> id_
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr'
>>> id_.decode('utf-8')
Traceback (most recent call last):
File "<console>", line 1, in <module>
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x90 in position 0: invalid start byte
The hexdigest method will produce a string as output; from the docs:
Like digest() except the digest is returned as a string object of double length, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non-binary environments.
>>> id_ = hashlib.md5('abc'.encode('utf-8')).hexdigest()
>>> id_
'900150983cd24fb0d6963f7d28e17f72'

Python: Unable to convert JSON file to CSV [duplicate]

This question already has an answer here:
Python TypeError: expected string or buffer
(1 answer)
Closed 5 years ago.
I have the code below which should convert a JSON file to a CSV file
import json
import csv
infractions = open("C:\\Users\\Alan\\Downloads\\open.json","r")
infractions_parsed = json.loads(infractions)
infractions_data = infractions_parsed['infractions']
# open a file for writing
csv_data = open('Data.csv', 'w')
# create the csv writer object
csvwriter = csv.writer(csv_data)
count = 0
for inf in infractions_data:
if count == 0:
header = inf.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(inf.values())
employ_data.close()
However, I get this error. Any reason why this should be?
C:\Users\Alan\Desktop>python monkeytennis.py
Traceback (most recent call last):
File "monkeytennis.py", line 5, in <module>
infractions_parsed = json.loads(infractions)
File "C:\Python27\lib\json\__init__.py", line 339, in loads
return _default_decoder.decode(s)
File "C:\Python27\lib\json\decoder.py", line 364, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
TypeError: expected string or buffer
JSON is in format:
{
"count": 666,
"query": "righthere",
"infractions": [{
"status": "open",
"severity": 2.0,
"title": "Blah blah blah",
"coals": [1, 1],
"date": "2017-04-22T23:10:07",
"name": "Joe Bloggs"
},...
infractions is a file object, which can't be passed directly to json.loads(). Either read it first:
infractions_parsed = json.loads(infractions.read())
or use json.load (without the 's') which does expect a buffer.
infractions_parsed = json.load(infractions)

Can't access mqlread API

I am trying to access the mqlread API from Freebase but am getting a "Not Found" 404:
api_key = open("freebaseApiKey").read()
mqlread_url = 'https://www.googleapis.com/freebase/v1/mqlread'
mql_query = '[{"mid": null,"name": null, "type": "/location/statistical_region","limit": 100}]'
cursor = ""
topicService_url = 'https://www.googleapis.com/freebase/v1/topic'
params = {
'key': api_key,
'filter': '/location/statistical_region',
'limit': 0
}
for i in xrange(1000):
mql_url = mqlread_url + '?query=' + mql_query + "&cursor=" + cursor
print mql_url
statisticalRegionsResult = json.loads(urllib.urlopen(mql_url).read())
....
Obviously when I run my python file I get:
https://www.googleapis.com/freebase/v1/mqlread?query=[{"mid": null,"name": null, "type": "/location/statistical_region","limit": 100}]&cursor=
Traceback (most recent call last):
File "[Filepath]...FreeBaseDownload.py", line 37, in <module>
statisticalRegionsResult = json.loads(urllib.urlopen(mql_url).read())
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/json/__init__.py", line 307, in loads
return _default_decoder.decode(s)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/json/decoder.py", line 319, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/json/decoder.py", line 338, in raw_decode
raise ValueError("No JSON object could be decoded")
ValueError: No JSON object could be decoded
What am I doing wrong with the API? I've read things about mqlread being deprecated, what is the parallel for my quest to get all statistical regions (the mids) in Freebase?
It was deprecated over a year ago. It was finally shut down May 2.
https://groups.google.com/forum/#!topic/freebase-discuss/WEnyO8f7xOQ
The only source for this information now is the Freebase data dump.
https://developers.google.com/freebase/#freebase-rdf-dumps

No JSON object could be decoded after retrieving JSON content using POST without Gzip encoding

Using Python 2.7.8 we get "ValueError: No JSON object could be decoded", if running this script:
from urllib2 import urlopen, Request
from json import dumps, loads, load
values = dumps({
"issueId": 10600,
"versionId": "10000",
"cycleId": "16",
"projectId": 10000
})
headers = {"Content-Type": "application/json"}
request = Request("http://private-anon-491d363a1-getzephyr.apiary-mock.com/jira_server/rest/zapi/latest/execution", data=values, headers=headers)
print request.get_method()
print request.get_header("Content-Encoding")
response = urlopen(request)
print "\n" + response.read()
print response.getcode()
print response.geturl()
print response.info()
json = loads(response.read()) # raises ValueError("No JSON object could be decoded")
Output is:
POST
None
{
"32": {
"id": 32,
"executionStatus": "-1",
"comment": "",
"htmlComment": "",
"cycleId": 16,
"cycleName": "Audit Test Cycle 3",
"versionId": 10000,
"versionName": "v1",
"projectId": 10000,
"issueId": 10600,
"issueKey": "ZFJ-19",
"summary": "test - check1",
"label": "",
"component": ""
}
}
200
http://private-anon-491d363a1-getzephyr.apiary-mock.com/jira_server/rest/zapi/latest/execution
Server: Cowboy
X-Apiary-Ratelimit-Limit: 120
X-Apiary-Ratelimit-Remaining: 119
Content-Type: application/json
Access-Control-Allow-Origin: *
Access-Control-Allow-Methods: OPTIONS,GET,HEAD,POST,PUT,DELETE,TRACE,CONNECT
Access-Control-Max-Age: 10
X-Apiary-Transaction-Id: 55423c1c7996e10300e32acc
Date: Thu, 30 Apr 2015 14:28:45 GMT
X-Cache: MISS from p-proxy.int.hrs.com
X-Cache-Lookup: MISS from p-proxy.int.hrs.com:3128
Via: 1.1 vegur, 1.0 p-proxy.int.hrs.com:3128 (squid/2.6.STABLE6)
Proxy-Connection: close
Traceback (most recent call last):
File "test2.py", line 20, in <module>
json = loads(response.read()) # raises ValueError("No JSON object could be decoded")
File "C:\Program Files (x86)\python27\lib\json\__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "C:\Program Files (x86)\python27\lib\json\decoder.py", line 366, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Program Files (x86)\python27\lib\json\decoder.py", line 384, in raw_decode
raise ValueError("No JSON object could be decoded")
ValueError: No JSON object could be decoded
I read a lot of Q&A involving aspects like Gzip and requests lib but it was not helpful. How can I decode the response using urlib2 and json libs?? If I copy the response into a file and parse the file, it works so it is valid json.
You cannot .read() file-like object (that is what according to the docs urlopen returns) twice. I modified your code, try it:
from urllib2 import urlopen, Request
from json import dumps, loads, load
values = dumps({
"issueId": 10600,
"versionId": "10000",
"cycleId": "16",
"projectId": 10000
})
headers = {"Content-Type": "application/json"}
request = Request("http://private-anon-491d363a1-getzephyr.apiary-mock.com/jira_server/rest/zapi/latest/execution", data=values, headers=headers)
print request.get_method()
print request.get_header("Content-Encoding")
response = urlopen(request)
text = response.read() #storing the data
print "\n" + text #note the usage of the stored string
print response.getcode()
print response.geturl()
print response.info()
json = loads(text) #note the usage of the stored string
You should store contents into a variable and decodes that variable. The sencod call of response.read() will not get any data.
Simple solution:
content = response.read()
json = loads(content)

Categories

Resources