How to remove comma parsing of strings in a CSV - python
I wrote the following code and I encountered an error where the CSV file wrote to using each of the items in a list, each entry is parsed.
Source Code:
import requests
import sys
import csv
bus = []
final = []
url = 'https://api.yelp.com/v3/businesses/search'
api_key = '**censored**'
headers = {
'Authorization':'Bearer ' + api_key
}
params = {
'term': 'coffee',
"location": sys.argv[1]
}
response = requests.get(url, headers=headers, params=params)
businesses = response.json()['businesses']
for business in businesses:
bus.append(business['name'])
for item in bus:
final.append(item)
final = []
print('The top 10 coffee shops are: ')
for item in bus[0:10]:
print(item)
with open('coffee_shops.csv', 'w+') as file:
writer = csv.writer(file)
writer.writerow('name')
for item in bus:
writer.writerow(item)
CSV Output:
n,a,m,e
J,a,y, ,J,e,a,n, ,C,a,f,e
L,e,g,e,n,d,s, ,C,a,f,�
C,o,s,t,a, ,C,o,f,f,e,e
T,h,e, ,F,i,g, ,L,e,a,f
O,r,l,i, ,E,s,p,e,e,s,s,o, ,B,a,r, ,B,a,k,e,r,y
C,a,f,e, ,2,3
T,i,p,p,y,s, ,C,a,f,e
A,u,n,t, ,S,a,l,l,y, ,C,a,f,e
B,e,l,g,i,q,u,e
C,o,f,f,e,e,7
B,u,n,n,o, ,C,o,f,f,e,e
C,o,s,t,a, ,C,o,f,f,e,e
C,a,k,e,s, ,a,n,d, ,S,h,a,k,e,s
T,h,e, ,D,e,l,i,c,i,o,u,s, ,C,a,f,e, ,S,a,n,d,w,i,c,h, ,B,a,r
C,a,f,f,e, ,L,a,t,t,e, ,B,a,r,k,i,n,g
C,a,k,e,s, ,&, ,B,a,k,e,s
C,a,f,e, ,C,h,i,c,c,h,i
D,e,l,i,c,i,o,u,s, ,C,a,f,e
L,i,t,t,l,e, ,W,o,o,d,f,o,r,d, ,C,a,f,e
P,o,r,k,y,s
Is there a way to remove the commas?
Thanks
Amen
As far as I understand your issue, you have a list of strings in a list named bus and you want to save this list to a file.
bus = ['A', 'B', 'C']
with open('out.txt', 'w') as f:
f.write('name\n')
for item in bus:
f.write(item + '\n')
out.txt
name
A
B
C
Related
How to add sub item to json in for loop
I am trying to achieve the following json output: My current code: #!/usr/bin/env python3.9 import json complete_lst = [] url_lst = ['https://', 'https://', 'https://'] complete_lst.append({'title': 'Hello'}) for url in url_lst: complete_lst.append({'watch': {'Season1': {'url': url}} }) with open("Hello.json", "w") as file: json.dump(complete_lst, file) the output json file looks like this : I want all the urls to be nested under watch->Season1->url key
Try this: import json complete_lst = [] url_lst = ['https://', 'https://', 'https://'] complete_lst.append({ 'title': 'Hello', 'watch': {'Season1':{"url":[]}} }) for url in url_lst: complete_lst[0]["watch"]["Season1"]["url"].append(url) print(complete_lst) If your data is static then just do that: import json complete_lst = [{ 'title': 'Hello', 'watch': {'Season1':{"url":['https://', 'https://', 'https://']}} }] print(complete_lst)
Another way of doing this would be to build a dictionary instead of list: #!/usr/bin/env python3.9 import json url_lst = ['https://', 'https://', 'https://'] complete_list = {} complete_list['title'] = "Hello" complete_list['watch'] = {} complete_list['watch']['Season1'] = {} complete_list['watch']['Season1']['urls'] = [] for url in url_lst: complete_list['watch']['Season1']['urls'].append(url) with open("Hello.json", "w") as file: json.dump(complete_list, file) Note: Here you don't need to access item by their indices and can directly use keys
python parse from text/csv file to request.post api
I would like to turn below curl command into python script. As i need to load the data information from a text or csv. However i have difficulties to parse the data from file. can someone help me to understand this. Thank you. curl -X POST -d '{"hostname":"localhost.localdomain","version":"v2c","community":"public"}' -H 'X-Auth-Token: my_api_token' https://xxx/api/v0/devices mycode: import requests import json import csv auth_token = "my_api_token" api_url_base = 'http://xxxx/api/v0/devices' headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer {0}'.format(auth_token)} def add_device(name, filename): with open(nodes.csv, 'r') as f: add_device = f.readline() add_device = {'hostname': $name, 'version': %version, 'community': %community} response = requests.post(api_url_base, headers=headers, json=add_device) print(response.json())
You should split your CSV content. The COUNT shows count of your comma-separated items in CSV file. add_devices = [] with open(nodes.csv, 'r') as f: line = f.readline(): items = line.split(',', COUNT) # if COUNT is 3 then add_devices = {'hostname': items[0], 'version': items[1], 'community': items[2]}
PYTHON JSONtoDICT help needed - It appears python is interpreting my json-dictionary conversion as a list
The following code is giving me the error: Traceback (most recent call last): File "AMZGetPendingOrders.py", line 66, in <module> item_list.append(item['SellerSKU']) TypeError: string indices must be integers The code: from mws import mws import time import json import xmltodict access_key = 'xx' #replace with your access key seller_id = 'yy' #replace with your seller id secret_key = 'zz' #replace with your secret key marketplace_usa = '00' orders_api = mws.Orders(access_key, secret_key, seller_id) orders = orders_api.list_orders(marketplaceids=[marketplace_usa], orderstatus=('Pending'), fulfillment_channels=('MFN'), created_after='2018-07-01') #save as XML file filename = 'c:order.xml' with open(filename, 'w') as f: f.write(orders.original) #ConvertXML to JSON dictString = json.dumps(xmltodict.parse(orders.original)) #Write new JSON to file with open("output.json", 'w') as f: f.write(dictString) #Read JSON and parse our order number with open('output.json', 'r') as jsonfile: data = json.load(jsonfile) #initialize blank dictionary id_list = [] for order in data['ListOrdersResponse']['ListOrdersResult']['Orders']['Order']: id_list.append(order['AmazonOrderId']) #This "gets" the orderitem info - this code actually is similar to the initial Amazon "get" though it has fewer switches orders_api = mws.Orders(access_key, secret_key, seller_id) #opens and empties the orderitem.xml file open('c:orderitem.xml', 'w').close() #iterated through the list of AmazonOrderIds and writes the item information to orderitem.xml for x in id_list: orders = orders_api.list_order_items(amazon_order_id = x) filename = 'c:orderitem.xml' with open(filename, 'a') as f: f.write(orders.original) #ConvertXML to JSON amz_items_pending = json.dumps(xmltodict.parse(orders.original)) #Write new JSON to file with open("pending.json", 'w') as f: f.write(amz_items_pending) #read JSON and parse item_no and qty with open('pending.json', 'r') as jsonfile1: data1 = json.load(jsonfile1) #initialize blank dictionary item_list = [] for item in data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']['OrderItem']: item_list.append(item['SellerSKU']) #print(item) #print(id_list) #print(data1) #print(item_list) time.sleep(10) I don't understand why Python thinks this is a list and not a dictionary. When I print id_list it looks like a dictionary (curly braces, single quotes, colons, etc) print(data1) shows my dictionary { 'ListOrderItemsResponse':{ '#xmlns':'https://mws.amazonservices.com/Orders/201 3-09-01', 'ListOrderItemsResult':{ 'OrderItems':{ 'OrderItem':{ 'QuantityOrdered ':'1', 'Title':'Delta Rothko Rolling Bicycle Stand', 'ConditionId':'New', 'Is Gift':'false', 'ASIN':'B00XXXXTIK', 'SellerSKU':'9934638', 'OrderItemId':'49 624373726506', 'ProductInfo':{ 'NumberOfItems':'1' }, 'QuantityShipped':'0', 'C onditionSubtypeId':'New' } }, 'AmazonOrderId':'112-9XXXXXX-XXXXXXX' }, 'ResponseM etadata':{ 'RequestId':'8XXXXX8-0866-44a4-96f5-XXXXXXXXXXXX' } } } Any ideas?
because you are iterating over each key value in dict: {'QuantityOrdered ': '1', 'Title': 'Delta Rothko Rolling Bicycle Stand', 'ConditionId': 'New', 'Is Gift': 'false', 'ASIN': 'B00XXXXTIK', 'SellerSKU': '9934638', 'OrderItemId': '49 624373726506', 'ProductInfo': {'NumberOfItems': '1'}, 'QuantityShipped': '0', 'C onditionSubtypeId': 'New'} so first value in item will be 'QuantityOrdered ' and you are trying to access this string as if it is dictionary you can just do: id_list.append(data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']['OrderItem']['SellerSKU'])) and avoid for loop in dictionary
I guess you are trying to iterate OrderItems and finding their SellerSKU values. for item in data1['ListOrderItemsResponse']['ListOrderItemsResult']['OrderItems']: item_list.append(item['SellerSKU'])
Python : Normalize Json response (array)
I am new to JSON and Python,I am trying to achieve below Need to parse below JSON { "id": "12345abc", "codes": [ "BSVN1FKW3JKKNNMN", "HJYYUKJJL999OJR", "DFTTHJJJJ0099JUU", "FGUUKHKJHJGJJYGJ" ], "ctr": { "source": "xyz", "user_id": "1234" } } Expected output:Normalized on "codes" value ID~CODES~USER_ID 12345abc~BSVN1FKW3JKKNNMN~1234 12345abc~HJYYUKJJL999OJR~1234 12345abc~DFTTHJJJJ0099JUU~1234 12345abc~FGUUKHKJHJGJJYGJ~1234 Started with below ,but need help to get to my desired output. The "codes" block can have n number of values separated by comma. The below code is throwing an error "TypeError: string indices must be integers" #!/usr/bin/python import os import json import csv f = open('rspns.csv','w') writer = csv.writer(f,delimiter = '~') headers = [‘ID’,’CODES’,’USER_ID’] default = '' writer.writerow(headers) string = open('sample.json').read().decode('utf-8') json_obj = json.loads(string) #print json_obj['id'] #print json_obj['codes'] #print json_obj['codes'][0] #print json_obj['codes'][1] #print json_obj['codes’][2] #print json_obj['codes’][3] #print json_obj['ctr’][‘user_id'] for keyword in json_obj: row = [] row.append(str(keyword['id'])) row.append(str(keyword['codes'])) row.append(str(keyword['ctr’][‘user_id'])) writer.writerow(row)
If your json_obj looks exactly like that , that is it is a dictionary, then the issue is that when you do - for keyword in json_obj: You are iterating over keys in json_obj, then if you try to access ['id'] for that key it should error out saying string indices must be integers . You should first get the id and user_id before looping and then loop over json_obj['codes'] and then add the previously computed id and user_id along with the current value from codes list to the writer csv as a row. Example - import json import csv string = open('sample.json').read().decode('utf-8') json_obj = json.loads(string) with open('rspns.csv','w') as f: writer = csv.writer(f,delimiter = '~') headers = ['ID','CODES','USER_ID'] writer.writerow(headers) id = json_obj['id'] user_id = json_obj['ctr']['user_id'] for code in json_obj['codes']: writer.writerow([id,code,user_id])
You don't want to iterate through json_obj as that is a dictionary and iterating through will get the keys. The TypeError is caused by trying to index into the keys ('id', 'code', and 'ctr') -- which are strings -- as if they were a dictionary. Instead, you want a separate row for each code in json_obj['codes'] and to use the json_obj dictionary for your lookups: for code in json_obj['codes']: row = [] row.append(json_obj['id']) row.append(code) row.append(json_obj['ctr’][‘user_id']) writer.writerow(row)
Python 27 CSV to JSON POST
I'm transferring my movie ratings from IMDB to Trakt. I use a Python script to do so and can't get it to turn my list into serializable JSON. My script consists of a JSON uploader and an CSV reader, both work fine separately. I've looked into list vs. tuple, json.dumps options and syntax and into json.encoder. There is a lot on the topic available online but no complete CSV to JSON example. The following script includes all steps and a few lines of example data. If you want to test this script, you need the username, pass-SHA1 and API key of your Trakt account. Current Error: raise TypeError(repr(o) + " is not JSON serializable") TypeError: `enter code here`set(['["tt1535108", "Elysium", "8", "2013"]']) is not JSON serializable #=============================================================================== # Used CSV file (imdb_ratings.csv) #=============================================================================== # position,const,created,modified,description,Title,Title type,Directors,You rated,IMDb Rating,Runtime (mins),Year,Genres,Num. Votes,Release Date (month/day/year),URL # 1,tt1683526,Sat Feb 1 00:00:00 2014,,,Detachment,Feature Film,Tony Kaye,8,7.7,97,2011,drama,36556,2011-04-25,http://www.imdb.com/title/tt1683526/ # 2,tt1205537,Wed Jan 29 00:00:00 2014,,,Jack Ryan: Shadow Recruit,Feature Film,Kenneth Branagh,6,6.6,105,2014,"action, mystery, thriller",11500,2014-01-15,http://www.imdb.com/title/tt1205537/ # 3,tt1535108,Tue Jan 28 00:00:00 2014,,,Elysium,Feature Film,Neill Blomkamp,8,6.7,109,2013,"action, drama, sci_fi, thriller",176354,2013-08-07,http://www.imdb.com/title/tt1535108/ #=============================================================================== # Imports etc. #=============================================================================== import csv import json import urllib2 ifile = open('imdb_ratings.csv', "rb") reader = csv.reader(ifile) included_cols = [1, 5, 8, 11] #=============================================================================== # CSV to JSON #=============================================================================== rownum = 0 for row in reader: # Save header row. if rownum == 0: header = row else: content = list(row[i] for i in included_cols) print(content) rownum += 1 ifile.close() #=============================================================================== # POST of JSON #=============================================================================== data = { "username": "<username>", "password": "<SHA1>", "movies": [ { # Expected format: # "imdb_id": "tt0114746", # "title": "Twelve Monkeys", # "year": 1995, # "rating": 9 json.dumps(content) } ] } req = urllib2.Request('http://api.trakt.tv/rate/movies/<api>') req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req, json.dumps(data))
Construct the dict: { "imdb_id": "tt0114746", "title": "Twelve Monkeys", "year": 1995, "rating": 9 } instead of calling json.dumps(content), which creates a string. You could create the list of dicts using a list comprehension and a dict comprehension: movies = [{field:row[i] for field, i in zip(fields, included_cols)} for row in reader] import csv import json import urllib2 with open('imdb_ratings.csv', "rb") as ifile: reader = csv.reader(ifile) next(reader) # skip header row included_cols = [1, 5, 8, 11] fields = ['imdb_id', 'title', 'rating', 'year'] movies = [{field: row[i] for field, i in zip(fields, included_cols)} for row in reader] data = {"username": "<username>", "password": "<SHA1>", "movies": movies} req = urllib2.Request('http://api.trakt.tv/rate/movies/<api>') req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req, json.dumps(data))