I am trying to achieve the following json output:
My current code:
#!/usr/bin/env python3.9
import json
complete_lst = []
url_lst = ['https://', 'https://', 'https://']
complete_lst.append({'title': 'Hello'})
for url in url_lst:
complete_lst.append({'watch': {'Season1': {'url': url}}
})
with open("Hello.json", "w") as file:
json.dump(complete_lst, file)
the output json file looks like this :
I want all the urls to be nested under watch->Season1->url key
Try this:
import json
complete_lst = []
url_lst = ['https://', 'https://', 'https://']
complete_lst.append({
'title': 'Hello',
'watch': {'Season1':{"url":[]}}
})
for url in url_lst:
complete_lst[0]["watch"]["Season1"]["url"].append(url)
print(complete_lst)
If your data is static then just do that:
import json
complete_lst = [{
'title': 'Hello',
'watch': {'Season1':{"url":['https://', 'https://', 'https://']}}
}]
print(complete_lst)
Another way of doing this would be to build a dictionary instead of list:
#!/usr/bin/env python3.9
import json
url_lst = ['https://', 'https://', 'https://']
complete_list = {}
complete_list['title'] = "Hello"
complete_list['watch'] = {}
complete_list['watch']['Season1'] = {}
complete_list['watch']['Season1']['urls'] = []
for url in url_lst:
complete_list['watch']['Season1']['urls'].append(url)
with open("Hello.json", "w") as file:
json.dump(complete_list, file)
Note: Here you don't need to access item by their indices and can directly use keys
Related
I'm trying to convert xml to json in python using xmltodict library. Though, the xml is getting converted to json, before every key in dict, '#' is getting prefixed. Below is the code snippet and sample output:
import xmltodict
import json
with open('response.xml','r') as res_file:
doc = xmltodict.parse(res_file.read())
xml_json_str = json.dumps(doc)
final_json = json.loads(xml_json_str)
Output:
"CustomerInfo": {
"#address": "Bangalore, Karnataka 560034",
"#email": "abc#gmail.com",
"#name": "Sam",
}
How to remove # from all key's at one go?
Finally I found a solution which works like charm. While parsing the xml, set attr_prefix='' to remove all # from keys.
Below changes worked for me:
with open('response.xml','r') as res_file:
doc = xmltodict.parse(res_file.read(), attr_prefix='')
Check this out:
It will remove all the # from all keys be it in any node: I have added one extra note just to show you the example:
def removeAtTheRate(jsonFile,final_json_edited):
if jsonFile != {} and type(jsonFile) == dict:
for i in jsonFile.keys():
final_json_values = {}
for j in jsonFile[i]:
if j[:1] == '#':
final_json_values[j[1:]] = jsonFile[i][j]
if i[:1] == '#':
final_json_edited[i[1:]] = final_json_values
else:
final_json_edited[i] = final_json_values
print(final_json_edited)
doc = {"#CustomerInfo":{"#address": "Bangalore, Karnataka 560034","#email": "abc#gmail.com","#name": "Sam"},"Location":{"#Loc":"Mum"}}
removeAtTheRate(doc,{})
Result:
>> {'Location': {'Loc': 'Mum'}, 'CustomerInfo': {'name': 'Sam', 'address':
'Bangalore, Karnataka 560034', 'email': 'abc#gmail.com'}}
I've been having some trouble sending files via python's rest module. I can send emails without attachments just fine but as soon as I try and add a files parameter, the call fails and I get a 415 error.
I've looked through the site and found out it was maybe because I wasn't sending the content type of the files when building that array of data so altered it to query the content type with mimetypes; still 415.
This thread: python requests file upload made a couple of more edits but still 415.
The error message says:
"A supported MIME type could not be found that matches the content type of the response. None of the supported type(s)"
Then lists a bunch of json types e.g: "'application/json;odata.metadata=minimal;odata.streaming=true;IEEE754Compatible=false"
then says:
"matches the content type 'multipart/form-data; boundary=0e5485079df745cf0d07777a88aeb8fd'"
Which of course makes me think I'm still not handling the content type correctly somewhere.
Can anyone see where I'm going wrong in my code?
Thanks!
Here's the function:
def send_email(access_token):
import requests
import json
import pandas as pd
import mimetypes
url = "https://outlook.office.com/api/v2.0/me/sendmail"
headers = {
'Authorization': 'Bearer '+access_token,
}
data = {}
data['Message'] = {
'Subject': "Test",
'Body': {
'ContentType': 'Text',
'Content': 'This is a test'
},
'ToRecipients': [
{
'EmailAddress':{
'Address': 'MY TEST EMAIL ADDRESS'
}
}
]
}
data['SaveToSentItems'] = "true"
json_data = json.dumps(data)
#need to convert the above json_data to dict, otherwise it won't work
json_data = json.loads(json_data)
###ATTACHMENT WORK
file_list = ['test_files/test.xlsx', 'test_files/test.docx']
files = {}
pos = 1
for file in file_list:
x = file.split('/') #seperate file name from file path
files['file'+str(pos)] = ( #give the file a unique name
x[1], #actual filename
open(file,'rb'), #open the file
mimetypes.MimeTypes().guess_type(file)[0] #add in the contents type
)
pos += 1 #increase the naming iteration
#print(files)
r = requests.post(url, headers=headers, json=json_data, files=files)
print("")
print(r)
print("")
print(r.text)
I've figured it out! Took a look at the outlook API documentation and realised I should be adding attachments as encoded lists within the message Json, not within the request.post function. Here's my working example:
import requests
import json
import pandas as pd
import mimetypes
import base64
url = "https://outlook.office.com/api/v2.0/me/sendmail"
headers = {
'Authorization': 'Bearer '+access_token,
}
Attachments = []
file_list = ['test_files/image.png', 'test_files/test.xlsx']
for file in file_list:
x = file.split('/') #file the file path so we can get it's na,e
filename = x[1] #get the filename
content = open(file,'rb') #load the content
#encode the file into bytes then turn those bytes into a string
encoded_string = ''
with open(file, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
encoded_string = encoded_string.decode("utf-8")
#append the file to the attachments list
Attachments.append({
"#odata.type": "#Microsoft.OutlookServices.FileAttachment",
"Name": filename,
"ContentBytes": encoded_string
})
data = {}
data['Message'] = {
'Subject': "Test",
'Body': {
'ContentType': 'Text',
'Content': 'This is a test'
},
'ToRecipients': [
{
'EmailAddress':{
'Address': 'EMAIL_ADDRESS'
}
}
],
"Attachments": Attachments
}
data['SaveToSentItems'] = "true"
json_data = json.dumps(data)
json_data = json.loads(json_data)
r = requests.post(url, headers=headers, json=json_data)
print(r)
The following code is giving me the error:
Traceback (most recent call last): File "AMZGetPendingOrders.py", line 66, in <module>
item_list.append(item['SellerSKU']) TypeError: string indices must be integers
The code:
from mws import mws
import time
import json
import xmltodict
access_key = 'xx' #replace with your access key
seller_id = 'yy' #replace with your seller id
secret_key = 'zz' #replace with your secret key
marketplace_usa = '00'
orders_api = mws.Orders(access_key, secret_key, seller_id)
orders = orders_api.list_orders(marketplaceids=[marketplace_usa], orderstatus=('Pending'), fulfillment_channels=('MFN'), created_after='2018-07-01')
#save as XML file
filename = 'c:order.xml'
with open(filename, 'w') as f:
f.write(orders.original)
#ConvertXML to JSON
dictString = json.dumps(xmltodict.parse(orders.original))
#Write new JSON to file
with open("output.json", 'w') as f:
f.write(dictString)
#Read JSON and parse our order number
with open('output.json', 'r') as jsonfile:
data = json.load(jsonfile)
#initialize blank dictionary
id_list = []
for order in data['ListOrdersResponse']['ListOrdersResult']['Orders']['Order']:
id_list.append(order['AmazonOrderId'])
#This "gets" the orderitem info - this code actually is similar to the initial Amazon "get" though it has fewer switches
orders_api = mws.Orders(access_key, secret_key, seller_id)
#opens and empties the orderitem.xml file
open('c:orderitem.xml', 'w').close()
#iterated through the list of AmazonOrderIds and writes the item information to orderitem.xml
for x in id_list:
orders = orders_api.list_order_items(amazon_order_id = x)
filename = 'c:orderitem.xml'
with open(filename, 'a') as f:
f.write(orders.original)
#ConvertXML to JSON
amz_items_pending = json.dumps(xmltodict.parse(orders.original))
#Write new JSON to file
with open("pending.json", 'w') as f:
f.write(amz_items_pending)
#read JSON and parse item_no and qty
with open('pending.json', 'r') as jsonfile1:
data1 = json.load(jsonfile1)
#initialize blank dictionary
item_list = []
for item in data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']['OrderItem']:
item_list.append(item['SellerSKU'])
#print(item)
#print(id_list)
#print(data1)
#print(item_list)
time.sleep(10)
I don't understand why Python thinks this is a list and not a dictionary. When I print id_list it looks like a dictionary (curly braces, single quotes, colons, etc)
print(data1) shows my dictionary
{
'ListOrderItemsResponse':{
'#xmlns':'https://mws.amazonservices.com/Orders/201 3-09-01',
'ListOrderItemsResult':{
'OrderItems':{
'OrderItem':{
'QuantityOrdered ':'1',
'Title':'Delta Rothko Rolling Bicycle Stand',
'ConditionId':'New',
'Is Gift':'false',
'ASIN':'B00XXXXTIK',
'SellerSKU':'9934638',
'OrderItemId':'49 624373726506',
'ProductInfo':{
'NumberOfItems':'1'
},
'QuantityShipped':'0',
'C onditionSubtypeId':'New'
}
},
'AmazonOrderId':'112-9XXXXXX-XXXXXXX'
},
'ResponseM etadata':{
'RequestId':'8XXXXX8-0866-44a4-96f5-XXXXXXXXXXXX'
}
}
}
Any ideas?
because you are iterating over each key value in dict:
{'QuantityOrdered ': '1', 'Title': 'Delta Rothko Rolling Bicycle Stand', 'ConditionId': 'New', 'Is Gift': 'false', 'ASIN': 'B00XXXXTIK', 'SellerSKU': '9934638', 'OrderItemId': '49 624373726506', 'ProductInfo': {'NumberOfItems': '1'}, 'QuantityShipped': '0', 'C onditionSubtypeId': 'New'}
so first value in item will be 'QuantityOrdered ' and you are trying to access this string as if it is dictionary
you can just do:
id_list.append(data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']['OrderItem']['SellerSKU']))
and avoid for loop in dictionary
I guess you are trying to iterate OrderItems and finding their SellerSKU values.
for item in data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']:
item_list.append(item['SellerSKU'])
I have a .js file with the following content:
AppSettings = {
projectName:'myproject',
url: 'https://www.google.com',
appKey: 'k2y-myproject_124439_18111',
newsKey: '',
version: moment().format('YYMMDD_HHmmss'),
mixpanelToken: '08e97bef3930f330037d9z6t56395060'
};
Which I would like to convert it into a python dictionary that I can access as follows
>>> print(data['AppSettings']['url']
>>> 'https://www.google.com'
What is the best way to achieve this?
Code
d = {'AppSettings':{}}
with open('tt.js', 'r') as f:
next(f)
for line in f:
splitLine = line.strip().replace(',','').split(':')
d['AppSettings'][splitLine[0]] = "".join(splitLine[1:])
d['AppSettings']['url']=d['AppSettings']['url'].replace('https//','https://')
d['AppSettings'].pop("}", None) #remove last item "}" from dict
print(d['AppSettings']['url'])
print(d['AppSettings']['newsKey'])
print(d['AppSettings']['appKey'])
print(d['AppSettings']['version'])
print(d['AppSettings']['mixpanelToken'])
Sample output
'https://www.google.com'
''
'k2y-myproject_124439_18111'
moment().format('YYMMDD_HHmmss')
'08e97bef3930f330037d9z6t56395060'
I have a Python code, see below, which takes a JSON file in the structure:
{
"name":"Winking Entertainment",
"imports":"Translink Capital"
},
{
"name":"Wochacha",
"imports":"Sequoia Capital"
},
{
"name":"Wuhan Kindstar Diagnostics",
"imports":"Baird Venture Partners"
},
And aggregates repeat values in "imports" and turns the matching strings into a single array for that entry. (see snippet below)
import json
from collections import defaultdict
def map_names_to_imports(raw_data):
name_to_imports = defaultdict(list)
for row in raw_data:
name_to_imports[row['imports']].append(row['name'])
return name_to_imports
def reformat(name_to_imports):
output = []
for name, imports in name_to_imports.items():
new_dict = {
'name': name,
'imports': list(set(imports))
}
output.append(new_dict)
return output
def run(raw_data):
name_to_imports = map_names_to_imports(raw_data)
output = reformat(name_to_imports)
with open('clean-data2.json','wb') as f:
f.write(json.dumps(output))
if __name__ == '__main__':
raw_data = json.load(open('bricinvestors.json'))
run(raw_data)
The issue I am having is my Json file is not coming out the right way.
For some reason, name and imports are getting reversed. So my output looks like:
{"imports": ["SinoHub"], "name": "Iroquois Capital"}, {"imports": ["Qunar.com", "Lashou.com"], "name": "Tenaya Capital"}
In fact, I want to keep the {"name": "string", "imports": "string"} format -- and not the other way around.
What should I do?
Thanks.
If you're using Python 2.7+, you could use collections.OrderedDict as your input to json.loads(), instead of the standard Python dict. The standard library dict class doesn't guarantee the ordering of keys.
Building on dano's answer, you could use the OrderedDict.setdefault method instead of using a defaultdict:
import json
import collections
OrderedDict = collections.OrderedDict
def map_names_to_imports(raw_data):
name_to_imports = OrderedDict()
for row in raw_data:
name_to_imports.setdefault(row['imports'], []).append(row['name'])
return name_to_imports
def reformat(name_to_imports):
output = []
for name, imports in name_to_imports.items():
new_dict = OrderedDict([('name', name),
('imports', list(set(imports)))])
output.append(new_dict)
return output
def run(raw_data):
name_to_imports = map_names_to_imports(raw_data)
output = reformat(name_to_imports)
with open('clean-data2.json', 'wb') as f:
f.write(json.dumps(output))
if __name__ == '__main__':
raw_data = json.load(open('bricinvestors.json'),
object_pairs_hook=OrderedDict)
run(raw_data)
Final version, which is based in large part on #unutbu's answer.
import json
import collections
OrderedDict = collections.OrderedDict
def map_names_to_imports(raw_data):
name_to_imports = OrderedDict()
for row in raw_data:
name_to_imports.setdefault(row['imports'], []).append(row['name'])
return name_to_imports
def reformat(name_to_imports):
the_output = []
for name, imports in name_to_imports.items():
new_dict = OrderedDict([('name', name),
('imports', list(set(imports)))])
the_output.append(new_dict)
return the_output
def run(raw_data):
name_to_imports = map_names_to_imports(raw_data)
the_output = reformat(name_to_imports)
with open('data/clean-data2.json', 'w+', encoding='utf8') as f:
f.write(json.dumps(the_output))
if __name__ == '__main__':
raw_data = json.load(open('data/bricsinvestorsfirst.json'), object_pairs_hook=OrderedDict)
run(raw_data)