#! /usr/bin/env python3
from contextlib import redirect_stdout
import io
from lib2to3.pgen2.token import LESS
from locale import format_string
from turtle import back
import requests
import json
import sys
from requests.structures import CaseInsensitiveDict
### ONLY PROVEN FOR ONE OBJECT AT A TIME
#data = open('commandoutputid.json')
#data = json.JSONDecoder(data)
#data = [json.loads(line) for line in open('commandoutputid.json', 'r')]
#data2= json.dumps(data)
### WOULD NOT WORK IF I DIDNT PASS DATA AS A STRING
### EVEN THOUGH WHEN I PASSED AS A GOOD JSON OBJECT INSIDE AN ARRAY. COULDNT DESERIALIZE
with open('commandoutputid.json', 'r') as json_file:
json_dict = json.load(json_file)
# dumps the json object into an element
json_str = json.dumps(json_dict)
# load the json to a string
dict_str = json.loads(json_str)
url = 'https://...'
headers = CaseInsensitiveDict()
headers["Accept"] = "plain/text"
headers["Authorization"] = "Bearer Token"
response = requests.post(url, json=dict_str, headers=headers, verify=False)
with open("backup_output.txt", "wb") as f:
f.write(response.content)
print(response.content)
print(dict_str)
The above script works for passing JSON Object via post in order to return desired output.
My goal is to iterate through hundreds if not thousands of JSON objects to pass VIA POST Request for each JSON Object in the "commandoutputid.json". So instead of the Single POST Request. It would me a Post request per JSON Object in the file.
So you're dealing with an NDJSON (Newline Delimited JSON) instead of a JSON.
http://ndjson.org/
Of course python has a library for it → https://pypi.org/project/ndjson/
Below is a simple load/loop to show how to deal with it.
main.py
import ndjson
from pprint import pprint
with open('commandoutputid.ndjson', 'r') as infile:
all_data = ndjson.load(infile)
for dict_element in all_data:
print(type(dict_element))
pprint(dict_element, indent=4)
requirements.txt
ndjson
commandoutputid.ndjson
{"deviceId":1077,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.685243+00:00"}
{"deviceId":1082,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.705206+00:00"}
{"deviceId":763,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.698229+00:00"}
output
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 1077,
'logicalSystemName': '<none>',
'seenAt': '2022-11-15T06:00:01.685243+00:00'}
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 1082,
'logicalSystemName': '<none>',
'seenAt': '2022-11-15T06:00:01.705206+00:00'}
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 763,
'logicalSystemName': '<none>',
Related
I am a new learner to python. I am working with some python code that calls an api and get a response in csv format. I would like to know how can I save that csv response to a csv fie.
#! /usr/bin/env python
import httplib2
# These aren't needed, just for this example
from pprint import pformat
from datetime import datetime
import pytz
from pvlive_api import PVLive
import pandas as pd
import json
def post_elexon(url):
http_obj = httplib2.Http()
resp, content = http_obj.request(
uri=url,
method='GET',
headers={'Content-Type': 'application/xml; charset=UTF-8'},)
return resp, content
def main():
resp, content = post_elexon(url='https://api.bmreports.com/BMRS/B1770/v1?APIKey=MY_API_KEY&SettlementDate=2015-03-01&Period=1&ServiceType=csv',)
print ("===Response===")
print (resp)
print ("===Content===")
print (pformat(content))
print ("===Finished===")
if __name__ == "__main__":
main()
Any help, advice would be greatly appreciated.
Thank you
Try this:
import csv
with open('out.csv', 'w') as f:
writer = csv.writer(resp)
for line in resp.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
Edit:
I tested your request - it returns a json.
so you can save it as json:
with open('response.json', 'w') as f:
json.dump(resp, f)
This is my code:
from os import rename, write
import requests
import json
url = "https://api.github.com/search/users?q=%7Bquery%7D%7B&page,per_page,sort,order%7D"
data = requests.get(url).json()
print(data)
outfile = open("C:/Users/vladi/Desktop/json files Vlad/file structure first attemp.json", "r")
json_object = json.load(outfile)
with open(data,'w') as endfile:
endfile.write(json_object)
print(endfile)
I want to call API request.
I want to take data from this URL: https://api.github.com/search/users?q=%7Bquery%7D%7B&page,per_page,sort,order%7D,
and rewrite it with my own data which is my file called file structure first attemp.json
and update this URL with my own data.
import requests
url = "https://api.github.com/search/usersq=%7Bquery%7D%7B&page,per_page,sort,order%7D"
data = requests.get(url)
with open(data,'w') as endfile:
endfile.write(data.text)
json.loads() returns a Python dictionary, which cannot be written to a file. Simply write the returned string from the URL.
response.json() is a built in feature that requests uses to load the JSON returned from the URL. So you are loading the JSON twice.
I am trying to download data from the iex api using python and currently I am to the point where i get the data, but now i want to format it.
basically i get a lot of data which i do not care about, I just want to have the "float" section.
The data should look like this:
Ticker, Float,
AAPL, 4700000000, (something like that)
The code I am using:
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print (response)
I would be verry happy if someone could explain me how to do this.
Kind regards
Right now I have the code:
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
data = (response['symbol'], response['float'])
import json filename='resp.json'
with open(filename, 'a+') as outfile:
json.dump(data, outfile, indent=4)
import requests
url = "https://api.iextrading.com/1.0/stock/tsla/stats"
response = requests.get(url).json()
data = (response['symbol'], response['float'])
import json filename='resp.json'
with open(filename, 'a+') as outfile:
json.dump(data, outfile, indent=4)
I would like the data to show as:
Ticker, Float,
AAPL, 4700000000,
TSLA, 1700000000,
(Ticker and float do not neceserally have to be placed above, i could do that myself in excel power query anyway).
You can just treat it like a dictionary. response['float'] would give you the float. Similarly for any key.
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print (response['float'])
print(response['symbol'])
Output
4705473314
AAPL
Your code is doing exactly what it should do, if you want a certain part of the json, just access it.
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print(response['float'])
>4705473314
print(response['symbol'])
>'AAPL'
print(response['symbol'], response['float'])
to store response in a json file, we can do something like this
import json
filename='resp.json'
with open(filename, 'w') as outfile:
json.dump(response, outfile, indent=4)
#!/usr/bin/python
import os
import json
import urllib
import urllib2
url = "https://www.example.com"
parameters = {'resource': 'aaaa',
'apikey': '1111'}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json_data = response.read()
with open("test.json") as json_file:
json_file.write(json_data)
print json_data
I dont see I use json again it was before json_data was used now.
As Martijn Pieters pointed out, the data is already encoded so you shouldn't need the json module at all in this case
You can just write the output to a file
json_data = response.read()
with open("test.json" , "w") as json_file:
json_file.write(json_data)
Since you already have json,
with open("test.json", "w") as f:
f.write(data)
Another thing to note here, you should not have a variable named json, because
import json
json = 'some string'
json.dumps("{'a':1}")
>> AttributeError: 'str' object has no attribute 'dumps'
if you want a variable named json you can use import json as j
I'm trying to load JSON data in Pandas in order to do some analysis.
Here is an example of the data I'm analyzing.
http://data.ncaa.com/jsonp/game/football/fbs/2013/08/31/wyoming-nebraska/field.json
I have tried the following:
import json
import pandas as pd
from pandas import DataFrame
json_data = pd.read_json('jsonv3.json')
and also
import json
import pandas
from pprint import pprint
json_data=open('jsonv3.json')
data = json.load(json_data)
pprint(data)
json_data.close()
The resulting errors are as follows:
1) ValueError: Expected object or value
2) ValueError: No JSON object could be decoded
I don't really know why the JSON file is not being recognized.
I've confirmed on http://jsonformatter.curiousconcept.com/ That it is valid JSON. I don't really know how to debug the issue. I haven't been able to find anything. Is the error potentially because of the JSON spacing format?
That's not JSON, it is JSONP. Note that the JSON "content" is wrapped in a "function call" callbackWrapper(...). From the wikipedia article: "The response to a JSONP request is not JSON and is not parsed as JSON".
If you've saved the JSONP response in the file jsonv3.json, you could strip off the function call wrapper and process the content with something like this:
import json
with open('jsonv3.json', 'r') as f:
response = f.read()
start = response.find('(')
end = response.rfind(')')
json_content = response[start+1:end]
data = json.loads(json_content)