Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 6 days ago.
Improve this question
I'm using an API from anomali to gather intel list and i wanna ask on how i could run the code so that it would output all the need columns header into an excel file.
So i created a code where i pull out the needed columns to be implemented to the site.
import requests
import json
import pandas as pd
import csv
url = 'https://api.threatstream.com/api/v2/intelligence/?itype=bot_ip'
csv_columns = ['ip','source_created', 'status', 'itype', 'expiration_ts', 'is_editable', 'feed_id', 'update_id',
'value', 'ispublic', 'threat_type', 'workgroups', 'rdns', 'confidence', 'uuid', 'retina_confidence',
'trusted_circle_ids', 'id', 'source', 'owner_organization_id', 'import_session_id', 'source_modified',
'type', 'sort', 'description', 'tags', 'threatscore', 'latitude', 'modified_ts', 'org', 'asn',
'created_ts', 'tlp', 'is_anonymous', 'country', 'source_reported_confidence', 'can_add_public_tags',
'subtype', 'meta', 'resource_uri']
with open("AnomaliThreat.csv","a", newline='') as filecsv:
writer = csv.DictWriter(filecsv, fieldnames=csv_columns)
writer.writeheader()
headers = {
'Accept': 'application/json',
'Authorization': 'apikey testing:wdwfawaf12321rfewawafa'
}
response= requests.get( url=url,headers=headers)
json_Data = json.loads(response.content)
result = json_Data["objects"]
with open("AnomaliThreat.csv","a", newline='')as filecsv:
writer = csv.DictWriter(filecsv,fieldnames=csv_columns)
writer.writerow(result)
If i ran this code, all i got is 'list' no attribute keys, my guess is because inside the response, there's a list inside the list or another string inside the list for example like this
'trusted_circle_ids': [1241412, 212141241]
or this
'tags': [{'id': 'fwafwff', 'name': 'wfwafwawf'},
{'id': '31231ewfw',
'name': 'fwafwafwafaw#gmail.com.wafawfawfds.com'}],
And this is what's inside the response of anomali
[{'source_created': None,
'status': 'inactive',
'itype': 'bot_ip',
'expiration_ts': '',
'ip': '231.24124.1241.412',
'is_editable': False,
'feed_id': 23112231,
'update_id': 231231,
'value': '124124124141224141',
'is_public': False,
'threat_type': 'bot',
'workgroups': [],
'rdns': None,
'confidence': 12,
'uuid': '3123414124124142',
'retina_confidence': 52414,
'trusted_circle_ids': [1241412, 212141241],
'id': fwaffewaewafw1231231,
'source': 'wfawfwaefwadfwa',
'owner_organization_id': 2,
'import_session_id': None,
'source_modified': None,
'type': 'ip',
'sort': [312312424124141241, '1241414214241'],
'description': None,
'tags': [{'id': 'fwafwff', 'name': 'wfwafwawf'},
{'id': '31231ewfw',
'name': 'fwafwafwafaw#gmail.com.wafawfawfds.com'}],
'threatscore': 412,
'latitude': wafefwaf,
'modified_ts': 'wawafwadfd',
'org': 'fawfwafawe',
'asn': 'fwafwa2131231',
'created_ts': '41241241241241',
'tlp': None,
'is_anonymous': False,
'country': 'fwafw',
'source_reported_confidence': 21,
'can_add_public_tags': False,
'longitude': --321412,
'subtype': None,
'meta': {'detail2': 'bi2141412412342424',
'severity': '3123124r3'},
'resource_uri': '/api/v2/intelligence/241fsdfsf241325/'},
{'source_created': None,
'status': 'inactive',
'itype': 'bot_ip',
'expiration_ts': '',
'ip': '231.24124.1241.412',
'is_editable': False,
'feed_id': 23112231,
'update_id': 231231,
'value': '124124124141224141',
'is_public': False,
'threat_type': 'bot',
'workgroups': [],
'rdns': None,
'confidence': 12,
'uuid': '3123414124124142',
'retina_confidence': 52414,
'trusted_circle_ids': [1241412, 212141241],
'id': fwaffewaewafw1231231,
'source': 'wfawfwaefwadfwa',
'owner_organization_id': 2,
'import_session_id': None,
'source_modified': None,
'type': 'ip',
'sort': [312312424124141241, '1241414214241'],
'description': None,
'tags': [{'id': 'fwafwff', 'name': 'wfwafwawf'},
{'id': '31231ewfw',
'name': 'fwafwafwafaw#gmail.com.wafawfawfds.com'}],
'threatscore': 412,
'latitude': wafefwaf,
'modified_ts': 'wawafwadfd',
'org': 'fawfwafawe',
'asn': 'fwafwa2131231',
'created_ts': '41241241241241',
'tlp': None,
'is_anonymous': False,
'country': 'fwafw',
'source_reported_confidence': 21,
'can_add_public_tags': False,
'longitude': --321412,
'subtype': None,
'meta': {'detail2': 'bi2141412412342424',
'severity': '3123124r3'},
'resource_uri': '/api/v2/intelligence/241fsdfsf241325/'}]
I'm open to any suggestions on how to make it so that the results can be inputed into an excel file
Problem Solved!
I needed to add a value to the code, so i added this line
csv_writer = csv.writer(data_file)
count = 0
for res in result:
if count == 0:
header = res.keys()
csv_writer.writerow(header)
count += 1
csv_writer.writerow(res.values())
data_file.close()
You can try doing something like this if i understood correctly,
import requests
import json
import pandas as pd
import csv
url = 'https://api.threatstream.com/api/v2/intelligence/?itype=bot_ip'
csv_columns = ['ip','source_created', 'status', 'itype', 'expiration_ts', 'is_editable', 'feed_id', 'update_id',
'value', 'ispublic', 'threat_type', 'workgroups', 'rdns', 'confidence', 'uuid', 'retina_confidence',
'trusted_circle_ids', 'id', 'source', 'owner_organization_id', 'import_session_id', 'source_modified',
'type', 'sort', 'description', 'tags', 'threatscore', 'latitude', 'modified_ts', 'org', 'asn',
'created_ts', 'tlp', 'is_anonymous', 'country', 'source_reported_confidence', 'can_add_public_tags',
'subtype', 'meta', 'resource_uri']
headers = {
'Accept': 'application/json',
'Authorization': 'apikey testing:wdwfawaf12321rfewawafa'
}
response= requests.get( url=url,headers=headers)
json_Data = json.loads(response.content)
result = json_Data["objects"]
dataframe_1 = pd.Dataframe
for key, value in result.items():
if key in csv_columns:
dataframe_1[key] = value
dataframe_1.to_csv("AnomaliThreat.csv")
something along those lines, so basically iterate through the key, value pairs with in the result, check if the key is in the csv_columns, save that key value pair, finally once all that is done just use the dataframe.to_csv
Related
I'm making a call to an api which is returning a JSON response, whcih i am then trying to retrieve certain data from within the response.
{'data': {'9674': {'category': 'token',
'contract_address': [{'contract_address': '0x2a3bff78b79a009976eea096a51a948a3dc00e34',
'platform': {'coin': {'id': '1027',
'name': 'Ethereum',
'slug': 'ethereum',
'symbol': 'ETH'},
'name': 'Ethereum'}}],
'date_added': '2021-05-10T00:00:00.000Z',
'date_launched': '2021-05-10T00:00:00.000Z',
'description': 'Wilder World (WILD) is a cryptocurrency '
'launched in 2021and operates on the '
'Ethereum platform. Wilder World has a '
'current supply of 500,000,000 with '
'83,683,300.17 in circulation. The last '
'known price of Wilder World is 2.28165159 '
'USD and is down -6.79 over the last 24 '
'hours. It is currently trading on 21 active '
'market(s) with $2,851,332.76 traded over '
'the last 24 hours. More information can be '
'found at https://www.wilderworld.com/.',
'id': 9674,
'is_hidden': 0,
'logo': 'https://s2.coinmarketcap.com/static/img/coins/64x64/9674.png',
'name': 'Wilder World',
'notice': '',
'platform': {'id': 1027,
'name': 'Ethereum',
'slug': 'ethereum',
'symbol': 'ETH',
'token_address': '0x2a3bff78b79a009976eea096a51a948a3dc00e34'},
'self_reported_circulating_supply': 19000000,
'self_reported_tags': None,
'slug': 'wilder-world',
'subreddit': '',
'symbol': 'WILD',
'tag-groups': ['INDUSTRY',
'CATEGORY',
'INDUSTRY',
'CATEGORY',
'CATEGORY',
'CATEGORY',
'CATEGORY'],
'tag-names': ['VR/AR',
'Collectibles & NFTs',
'Gaming',
'Metaverse',
'Polkastarter',
'Animoca Brands Portfolio',
'SkyVision Capital Portfolio'],
'tags': ['vr-ar',
'collectibles-nfts',
'gaming',
'metaverse',
'polkastarter',
'animoca-brands-portfolio',
'skyvision-capital-portfolio'],
'twitter_username': 'WilderWorld',
'urls': {'announcement': [],
'chat': [],
'explorer': ['https://etherscan.io/token/0x2a3bff78b79a009976eea096a51a948a3dc00e34'],
'facebook': [],
'message_board': ['https://medium.com/#WilderWorld'],
'reddit': [],
'source_code': [],
'technical_doc': [],
'twitter': ['https://twitter.com/WilderWorld'],
'website': ['https://www.wilderworld.com/']}}},
'status': {'credit_count': 1,
'elapsed': 7,
'error_code': 0,
'error_message': None,
'notice': None,
'timestamp': '2022-01-20T21:33:04.832Z'}}
The data i am trying to get is 'logo': 'https://s2.coinmarketcap.com/static/img/coins/64x64/9674.png', but this sits within [data][9674][logo]
But as this script to running in the background for other objects, i won't know what the number [9674] is for other requests.
So is there a way to get that number automatically?
[data] will always be consistent.
Im using this to get the data back
session = Session()
session.headers.update(headers)
response = session.get(url, params=parameters)
pprint.pprint(json.loads(response.text)['data']['9674']['logo'])
You can try this:
session = Session()
session.headers.update(headers)
response = session.get(url, params=parameters)
resp = json.loads(response.text)
pprint.pprint(resp['data'][next(iter(resp['data']))]['logo'])
where next(iter(resp['data'])) - returns first key in resp['data'] dict. In your example it '9674'
With .keys() you get a List of all Keys in a Dictionary.
So you can use keys = json.loads(response.text)['data'].keys() to get the keys in the data-dict.
If you know there is always only one entry in 'data' you could use json.loads(response.text)['data'][keys[0]]['logo']. Otherwise you would need to iterate over all keys in the list and check which one you need.
This is my first question on this spectacular website, I need to know how to export complex information from a JSON to a CSV.
The problem is that I need from the list that I have in the column to have two different values.
I tried a lot of different combinations and I couldn't so one of my last resources are asked to the community.
My code is this:
def output(alerts):
output = list()
for alert in alerts:
applications = alerts['applications']
for app in applications:
categories = app['categories']
for cat in categories:
output_alert = [list(cat.items())[0], app['confidence'], app['icon'],
app['name'], app['version'], app['website'], alerts['language'], alerts['status']]
output.append(output_alert)
df = pd.DataFrame(output, columns=['Categories', 'Confidence', 'Icon', 'Name', 'Version', 'Website',
'Language', 'Status'])
df.to_csv(args.output)
print('Scan completed, you already have your new CSV file')
return
enter image description here
I left you a picture of the CSV file with the problem in column B (I have a list there) but I need actually two columns with each value...
I attached the JSON response that I have from a REST API
{'applications': [{'categories': [{'59': 'JavaScript libraries'}],
'confidence': '100',
'icon': 'Lo-dash.png',
'name': 'Lodash',
'version': '4.17.15',
'website': 'http://www.lodash.com'},
{'categories': [{'12': 'JavaScript frameworks'}],
'confidence': '100',
'icon': 'RequireJS.png',
'name': 'RequireJS',
'version': '2.3.6',
'website': 'http://requirejs.org'},
{'categories': [{'13': 'Issue trackers'}],
'confidence': '100',
'icon': 'Sentry.svg',
'name': 'Sentry',
'version': '4.6.2',
'website': 'https://sentry.io/'},
{'categories': [{'1': 'CMS'},
{'6': 'Ecommerce'},
{'11': 'Blogs'}],
'confidence': '100',
'icon': 'Wix.png',
'name': 'Wix',
'version': None,
'website': 'https://www.wix.com'},
{'categories': [{'59': 'JavaScript libraries'}],
'confidence': '100',
'icon': 'Zepto.png',
'name': 'Zepto',
'version': None,
'website': 'http://zeptojs.com'},
{'categories': [{'19': 'Miscellaneous'}],
'confidence': '100',
'icon': 'webpack.svg',
'name': 'webpack',
'version': None,
'website': 'https://webpack.js.org/'},
{'categories': [{'12': 'JavaScript frameworks'}],
'confidence': '0',
'icon': 'React.png',
'name': 'React',
'version': None,
'website': 'https://reactjs.org'}], 'language': 'es', 'status': 'success'}
[{'59': 'JavaScript libraries'}] this last thing is my big problem! Thank you for your time and help!
You could try using list(cat.keys())[0], list(cat.values())[0] in your output_alert variable to extract key and value separately.
You can use json_normalize to extract your columns without the for-loop, and then create two new columns with the extracted keys and values from categories:
result = pd.json_normalize(
alerts,
record_path=["applications"],
meta=["language", "status"]
).explode("categories")
result["category_labels"] = result.categories.apply(lambda x: list(x.keys())[0])
result["category_values"] = result.categories.apply(lambda x: list(x.values())[0])
The output is:
This is my first time dealing with json data. So I'm not that familiar with the structure of json.
I got some data through "we the people" e-petition sites with following code:
url = "https://api.whitehouse.gov/v1/petitions.json?limit=3&offset=0&createdBefore=1573862400"
jdata_2 = requests.get(url).json()
Yet, I realize this is something different from... the ordinary json structure since I got some error while I tried to convert it into excel file with pandas
df = pandas.read_json(jdata_2)
Obviously, I must miss something which I must have done before using pandas.read_json() code.
I have searched for the answer but most of questions are "How can I convert json data into excel data", which needs json data. For my case, I scraped it from the url, so I thought I could make that strings into json data, and then try to convert it into excel data as well. So I tried to use json.dump() as well, but it didn't work as well.
I know it must be the naive question. But I'm not sure where I can start with this naive question. If anyone can instruct me how to deal with it, I would really appreciate it. Or link me some references that I can study as well.
Thank you for your help in advance.
This is the json data with the requests, and I pprint it with indent=4.
Input:
url = "https://api.whitehouse.gov/v1/petitions.json?limit=3&offset=0&createdBefore=1573862400"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(jdata_2)
Output :
{ 'metadata': { 'requestInfo': { 'apiVersion': 1,
'query': { 'body': None,
'createdAfter': None,
'createdAt': None,
'createdBefore': '1573862400',
'isPublic': 1,
'isSignable': None,
'limit': '3',
'mock': 0,
'offset': '0',
'petitionsDefaultLimit': '1000',
'publicThreshold': 149,
'responseId': None,
'signatureCount': None,
'signatureCountCeiling': None,
'signatureCountFloor': 0,
'signatureThreshold': None,
'signatureThresholdCeiling': None,
'signatureThresholdFloor': None,
'sortBy': 'DATE_REACHED_PUBLIC',
'sortOrder': 'ASC',
'status': None,
'title': None,
'url': None,
'websiteUrl': 'https://petitions.whitehouse.gov'},
'resource': 'petitions'},
'responseInfo': { 'developerMessage': 'OK',
'errorCode': '',
'moreInfo': '',
'status': 200,
'userMessage': ''},
'resultset': {'count': 1852, 'limit': 3, 'offset': 0}},
'results': [ { 'body': 'Please save kurdish people in syria \r\n'
'pleaee save north syria',
'created': 1570630389,
'deadline': 1573225989,
'id': '2798897',
'isPublic': True,
'isSignable': False,
'issues': [ { 'id': 326,
'name': 'Homeland Security & '
'Defense'}],
'petition_type': [ { 'id': 291,
'name': 'Call on Congress to '
'act on an issue'}],
'reachedPublic': 0,
'response': [],
'signatureCount': 149,
'signatureThreshold': 100000,
'signaturesNeeded': 99851,
'status': 'closed',
'title': 'Please save rojava north syria\r\n'
'please save kurdish people\r\n'
'please stop erdogan\r\n'
'plaease please',
'type': 'petition',
'url': 'https://petitions.whitehouse.gov/petition/please-save-rojava-north-syria-please-save-kurdish-people-please-stop-erdogan-plaease-please'},
{ 'body': 'Kane Friess was a 2 year old boy who was '
"murdered by his mom's boyfriend, Gyasi "
'Campbell. Even with expert statements from '
'forensic anthropologists, stating his injuries '
'wete the result of homicide. Mr. Campbell was '
'found guilty of involuntary manslaughter. This '
"is an outrage to Kane's Family and our "
'community.',
'created': 1566053365,
'deadline': 1568645365,
'id': '2782248',
'isPublic': True,
'isSignable': False,
'issues': [ { 'id': 321,
'name': 'Criminal Justice Reform'}],
'petition_type': [ { 'id': 281,
'name': 'Change an existing '
'Administration '
'policy'}],
'reachedPublic': 0,
'response': [],
'signatureCount': 149,
'signatureThreshold': 100000,
'signaturesNeeded': 99851,
'status': 'closed',
'title': "Kane's Law. Upon which the murder of a child, "
'regardless of circumstances, be seen as 1st '
'degree murder. A Federal Law.',
'type': 'petition',
'url': 'https://petitions.whitehouse.gov/petition/kanes-law-upon-which-murder-child-regardless-circumstances-be-seen-1st-degree-murder-federal-law'},
{ 'body': "Schumer and Pelosi's hatred and refusing to "
'work with President Donald J. Trump is holding '
'America hostage. We the people know securing '
'our southern border is a priority which will '
'not happen with these two in office. Lets '
'build the wall NOW!',
'created': 1547050064,
'deadline': 1549642064,
'id': '2722358',
'isPublic': True,
'isSignable': False,
'issues': [ {'id': 306, 'name': 'Budget & Taxes'},
{ 'id': 326,
'name': 'Homeland Security & '
'Defense'},
{'id': 29, 'name': 'Immigration'}],
'petition_type': [ { 'id': 291,
'name': 'Call on Congress to '
'act on an issue'}],
'reachedPublic': 0,
'response': [],
'signatureCount': 149,
'signatureThreshold': 100000,
'signaturesNeeded': 99851,
'status': 'closed',
'title': 'Remove Chuck Schumer and Nancy Pelosi from '
'office',
'type': 'petition',
'url': 'https://petitions.whitehouse.gov/petition/remove-chuck-schumer-and-nancy-pelosi-office'}]}
And this is the Error message I got
Input :
df = pandas.read_json(jdata_2)
Output :
ValueError: Invalid file path or buffer object type: <class 'dict'>
You can try the below code as well, it is working fine
URL = "https://api.whitehouse.gov/v1/petitions.json?limit=3&offset=0&createdBefore=1573862400"
// fetching the json response from the URL
req = requests.get(URL)
text_data= req.text
json_dict= json.loads(text_data)
//converting json dictionary to python dataframe for results object
df = pd.DataFrame.from_dict(json_dict["results"])
Finally, saving the dataframe to excel format i.e xlsx
df.to_excel("output.xlsx")
I want to convert this nested json into a df.
Tried different functions but none works correctly.
The encoding that worked for my was -
encoding = "utf-8-sig"
[{'replayableActionOperationState': 'SKIPPED',
'replayableActionOperationGuid': 'RAO_1037351',
'failedMessage': 'Cannot replay action: RAO_1037351: com.ebay.sd.catedor.core.model.DTOEntityPropertyChange; local class incompatible: stream classdesc serialVersionUID = 7777212484705611612, local class serialVersionUID = -1785129380151507142',
'userMessage': 'Skip all mode',
'username': 'gfannon',
'sourceAuditData': [{'guid': '24696601-b73e-43e4-bce9-28bc741ac117',
'operationName': 'UPDATE_CATEGORY_ATTRIBUTE_PROPERTY',
'creationTimestamp': 1563439725240,
'auditCanvasInfo': {'id': '165059', 'name': '165059'},
'auditUserInfo': {'id': 1, 'name': 'gfannon'},
'externalId': None,
'comment': None,
'transactionId': '0f135909-66a7-46b1-98f6-baf1608ffd6a',
'data': {'entity': {'guid': 'CA_2511202',
'tagType': 'BOTH',
'description': None,
'name': 'Number of Shelves'},
'propertyChanges': [{'propertyName': 'EntityProperty',
'oldEntity': {'guid': 'CAP_35',
'name': 'DisableAsVariant',
'group': None,
'action': 'SET',
'value': 'true',
'tagType': 'SELLER'},
'newEntity': {'guid': 'CAP_35',
'name': 'DisableAsVariant',
'group': None,
'action': 'SET',
'value': 'false',
'tagType': 'SELLER'}}],
'entityChanges': None,
'primary': True}}],
'targetAuditData': None,
'conflictedGuids': None,
'fatal': False}]
This is what i tried so far, there are more tries but that got me as close as i can.
with open(r"Desktop\Ann's json parsing\report.tsv", encoding='utf-8-sig') as data_file:
data = json.load(data_file)
df = json_normalize(data)
print (df)
pd.DataFrame(df) ## The nested lists are shown as a whole column, im trying to parse those colums - 'failedMessage' and 'sourceAuditData'`I also tried json.loads/json(df) but the output isnt correct.
pd.DataFrame.from_dict(a['sourceAuditData'][0]['data']['propertyChanges'][0]) ##This line will retrive one of the outputs i need but i dont know how to perform it on the whole file.
The expected result should be a csv/xlsx file with a column and value for each row.
For your particular example:
def unroll_dict(d):
data = []
for k, v in d.items():
if isinstance(v, list):
data.append((k, ''))
data.extend(unroll_dict(v[0]))
elif isinstance(v, dict):
data.append((k, ''))
data.extend(unroll_dict(v))
else:
data.append((k,v))
return data
And given the data in your question is stored in the variable example:
df = pd.DataFrame(unroll_dict(example[0])).set_index(0).transpose()
I am trying to pull 'created' from the Monzo data I'm pulling.
I have made a call to the Monzo api with the following code:
from monzo.monzo import Monzo
client = Monzo(INSERT API KEY)
data = client.get_transactions("INSERT ACCOUNT NUMBER")
print (data)
and I can't quite get the data I need which looks like this:
d': 'merch_000094MPASVBf7xCdrZOz3', 'created': '2016-01-20T21: 26: 33.985Z', 'name': 'DelicedeFrance', 'logo': 'https: //mondo-logo-cache.appspot.com/twitter/deliceuk/?size=large', 'emoji': '🇫🇷', 'category': 'eating_out', 'online': False, 'atm': False, 'address': {'short_formatted': 'LiverpoolStreetStation,
LondonEC2M7PY', 'formatted': 'LiverpoolStreetStation,
LondonEC2M7PY,
UnitedKingdom', 'address': 'LiverpoolStreetStation', 'city': 'London', 'region': 'GreaterLondon', 'country': 'GBR', 'postcode': 'EC2M7PY', 'latitude': 51.518159172221615, 'longitude': -0.08210659649555102, 'zoom_level': 17, 'approximate': False}, 'updated': '2016-02-02T14: 10: 48.664Z', 'metadata': {'foursquare_category': 'Restaurant', 'foursquare_category_icon': 'https: //ss3.4sqi.net/img/categories_v2/food/default_88.png','foursquare_website': '', 'google_places_icon': 'https: //maps.gstatic.com/mapfiles/place_api/icons/restaurant-71.png', 'google_places_name': 'DelicedeFrance', 'suggested_name': 'DelicedeFrance', 'suggested_tags': '#food', 'twitter_id': ''}, 'disable_feedback': False}, 'notes': '', 'metadata': {}, 'account_balance': 3112, 'attachments': [], 'category': 'eating_out', 'is_load': False, 'settled': '2017-04-28T04: 54: 18.167Z', 'local_amount': -199, 'local_currency': 'GBP', 'updated': '2017-04-28T06: 15: 06.095Z', 'counterparty': {}, 'originator': False, 'include_in_spending': True}, {'created': '2017-04-28T08: 54: 10.917Z','amount': -130, 'currency': 'GBP', 'merchant': {'created': '2016-04-21T08: 02: 13.537Z','logo': 'https: //mondo-logo-cache.appspot.com/twitter/MCSaatchiLondon/?size=large', 'emoji': '🍲', 'category': 'eating_out', 'online': False, 'atm': False...
How do I pull the 'created' date?
Try this:
#!/usr/bin/env python
import csv
from pymonzo import MonzoAPI
if __name__ == '__main__':
monzo_api = MonzoAPI()
monzo_transactions = monzo_api.transactions()
with open('monzo_transactions.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for transaction in monzo_transactions:
writer.writerow([
transaction.amount, transaction.description,
transaction.created,
])
print('All done!')
If this is actually right json code and you just have paste errors, than you can use the python libary json:
import json
data = json.loads(datastring)
If this not json code, you probably have to write a parser on your own.