#!/usr/bin/python
import os
import json
import urllib
import urllib2
url = "https://www.example.com"
parameters = {'resource': 'aaaa',
'apikey': '1111'}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json_data = response.read()
with open("test.json") as json_file:
json_file.write(json_data)
print json_data
I dont see I use json again it was before json_data was used now.
As Martijn Pieters pointed out, the data is already encoded so you shouldn't need the json module at all in this case
You can just write the output to a file
json_data = response.read()
with open("test.json" , "w") as json_file:
json_file.write(json_data)
Since you already have json,
with open("test.json", "w") as f:
f.write(data)
Another thing to note here, you should not have a variable named json, because
import json
json = 'some string'
json.dumps("{'a':1}")
>> AttributeError: 'str' object has no attribute 'dumps'
if you want a variable named json you can use import json as j
Related
#! /usr/bin/env python3
from contextlib import redirect_stdout
import io
from lib2to3.pgen2.token import LESS
from locale import format_string
from turtle import back
import requests
import json
import sys
from requests.structures import CaseInsensitiveDict
### ONLY PROVEN FOR ONE OBJECT AT A TIME
#data = open('commandoutputid.json')
#data = json.JSONDecoder(data)
#data = [json.loads(line) for line in open('commandoutputid.json', 'r')]
#data2= json.dumps(data)
### WOULD NOT WORK IF I DIDNT PASS DATA AS A STRING
### EVEN THOUGH WHEN I PASSED AS A GOOD JSON OBJECT INSIDE AN ARRAY. COULDNT DESERIALIZE
with open('commandoutputid.json', 'r') as json_file:
json_dict = json.load(json_file)
# dumps the json object into an element
json_str = json.dumps(json_dict)
# load the json to a string
dict_str = json.loads(json_str)
url = 'https://...'
headers = CaseInsensitiveDict()
headers["Accept"] = "plain/text"
headers["Authorization"] = "Bearer Token"
response = requests.post(url, json=dict_str, headers=headers, verify=False)
with open("backup_output.txt", "wb") as f:
f.write(response.content)
print(response.content)
print(dict_str)
The above script works for passing JSON Object via post in order to return desired output.
My goal is to iterate through hundreds if not thousands of JSON objects to pass VIA POST Request for each JSON Object in the "commandoutputid.json". So instead of the Single POST Request. It would me a Post request per JSON Object in the file.
So you're dealing with an NDJSON (Newline Delimited JSON) instead of a JSON.
http://ndjson.org/
Of course python has a library for it → https://pypi.org/project/ndjson/
Below is a simple load/loop to show how to deal with it.
main.py
import ndjson
from pprint import pprint
with open('commandoutputid.ndjson', 'r') as infile:
all_data = ndjson.load(infile)
for dict_element in all_data:
print(type(dict_element))
pprint(dict_element, indent=4)
requirements.txt
ndjson
commandoutputid.ndjson
{"deviceId":1077,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.685243+00:00"}
{"deviceId":1082,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.705206+00:00"}
{"deviceId":763,"aggregationId":"1aba3c13-5891-4c75-8648-4fe6b954e1f5","commandText":"show running-config","logicalSystemName":"<none>","seenAt":"2022-11-15T06:00:01.698229+00:00"}
output
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 1077,
'logicalSystemName': '<none>',
'seenAt': '2022-11-15T06:00:01.685243+00:00'}
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 1082,
'logicalSystemName': '<none>',
'seenAt': '2022-11-15T06:00:01.705206+00:00'}
<class 'dict'>
{ 'aggregationId': '1aba3c13-5891-4c75-8648-4fe6b954e1f5',
'commandText': 'show running-config',
'deviceId': 763,
'logicalSystemName': '<none>',
I am a new learner to python. I am working with some python code that calls an api and get a response in csv format. I would like to know how can I save that csv response to a csv fie.
#! /usr/bin/env python
import httplib2
# These aren't needed, just for this example
from pprint import pformat
from datetime import datetime
import pytz
from pvlive_api import PVLive
import pandas as pd
import json
def post_elexon(url):
http_obj = httplib2.Http()
resp, content = http_obj.request(
uri=url,
method='GET',
headers={'Content-Type': 'application/xml; charset=UTF-8'},)
return resp, content
def main():
resp, content = post_elexon(url='https://api.bmreports.com/BMRS/B1770/v1?APIKey=MY_API_KEY&SettlementDate=2015-03-01&Period=1&ServiceType=csv',)
print ("===Response===")
print (resp)
print ("===Content===")
print (pformat(content))
print ("===Finished===")
if __name__ == "__main__":
main()
Any help, advice would be greatly appreciated.
Thank you
Try this:
import csv
with open('out.csv', 'w') as f:
writer = csv.writer(resp)
for line in resp.iter_lines():
writer.writerow(line.decode('utf-8').split(','))
Edit:
I tested your request - it returns a json.
so you can save it as json:
with open('response.json', 'w') as f:
json.dump(resp, f)
I am trying to download data from the iex api using python and currently I am to the point where i get the data, but now i want to format it.
basically i get a lot of data which i do not care about, I just want to have the "float" section.
The data should look like this:
Ticker, Float,
AAPL, 4700000000, (something like that)
The code I am using:
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print (response)
I would be verry happy if someone could explain me how to do this.
Kind regards
Right now I have the code:
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
data = (response['symbol'], response['float'])
import json filename='resp.json'
with open(filename, 'a+') as outfile:
json.dump(data, outfile, indent=4)
import requests
url = "https://api.iextrading.com/1.0/stock/tsla/stats"
response = requests.get(url).json()
data = (response['symbol'], response['float'])
import json filename='resp.json'
with open(filename, 'a+') as outfile:
json.dump(data, outfile, indent=4)
I would like the data to show as:
Ticker, Float,
AAPL, 4700000000,
TSLA, 1700000000,
(Ticker and float do not neceserally have to be placed above, i could do that myself in excel power query anyway).
You can just treat it like a dictionary. response['float'] would give you the float. Similarly for any key.
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print (response['float'])
print(response['symbol'])
Output
4705473314
AAPL
Your code is doing exactly what it should do, if you want a certain part of the json, just access it.
import requests
url = "https://api.iextrading.com/1.0/stock/aapl/stats"
response = requests.get(url).json()
print(response['float'])
>4705473314
print(response['symbol'])
>'AAPL'
print(response['symbol'], response['float'])
to store response in a json file, we can do something like this
import json
filename='resp.json'
with open(filename, 'w') as outfile:
json.dump(response, outfile, indent=4)
I am trying to read a json file from python script using the json module. After some googling I found the following code:
with open(json_folder+json) as json_file:
json_data = json.loads(json_file)
print(json_data)
Where json_folder+json are the path and the name of the json file. I am getting the following error:
str object has no attribute loads.
The code is using json as a variable name. It will shadow the module reference you imported. Use different name for the variable.
Beside that, the code is passing file object, while json.loads accept a string.
Pass a file content:
json_data = json.loads(json_file.read())
or use json.load which accepts file-like object.
json_data = json.load(json_file)
import json
f = open( "fileToOpen.json" , "rb" )
jsonObject = json.load(f)
f.close()
it should seems you are doing in rather complicated way.
Try like this :-
json_data=open(json_file)
data = json.load(json_data)
json_data.close()
Considering the path to your json file is set to the variable json_file:
import json
with open(json_file, "rb") as f:
json_data = json.load(f)
print json_data
I Make This....
import urllib2
link_json = "\\link-were\\"
link_open = urllib2.urlopen(link_json) ## Open and Return page.
link_read = link_open.read() ## Read contains of page.
json = eval(link_read)[0] ## Transform the string of read in link_read and return the primary dictionary ex: [{dict} <- return this] <- remove this
print(json['helloKey'])
Hello World
I wrote a code to extract JSON objects from the github website using json and requests:
#!/usr/bin/python
import json
import requests
r = requests.get('https://github.com/timeline.json') #Replace with your website URL
with open("a.txt", "w") as f:
for item in r.json or []:
try:
f.write(item['repository']['name'] + "\n")
except KeyError:
pass
This works perfectly fine. However, I want to do the same thing using urllib2 and standard json module. How do I do that? Thanks.
Simply download the data with urlopen and parse it with Python's json module:
import json
import urllib2
r = urllib2.urlopen('https://github.com/timeline.json')
with open("a.txt", "w") as f:
for item in json.load(r) or []:
try:
f.write(item['repository']['name'] + "\n")
except KeyError:
pass