I've been trying to make an API call to get all the users from an Apple Store.
After using this script I only get a limited number of results:
import json
from appstoreconnectapi import AppStoreConnect
ISSUER_ID = "..."
KEY_ID = "..."
asc = AppStoreConnect(KEY_ID, "./key.p8", ISSUER_ID)
res = asc.fetch(uri='/v1/users', method='get', post_data=None)
print (res)
with open('output.json', 'w') as out:
out.write(json.dumps(res, indent=4))
After 50 result showed this is what i get at the end of my JSON file:
"meta": {
"paging": {
"total": 356,
"limit": 50
}
}
Related
I am new in python and REST world.
My Python script
import json
import requests
with open(r"create-multiple-Users.json", "r") as payload:
data = json.load(payload)
json_data = json.dumps(data, indent=2)
headers = {'content-type': 'application/json; charset=utf-8'}
for i in range(len(data)):
r = requests.post('http://localhost:3000/users',
data=json_data, headers=headers)
Mock API server: https://github.com/typicode/json-server .
Entry file: "info.json" with Endpoint: /users that has one user initially.
{
"users": [
{
"id": 1,
"name": "John",
"job": "Wong"
}
]
}
Issue:
POSTing from a file with only one user works perfectly. The new user is appended to info.json as expected as an object.
But when trying to POST let's say 3 users from file "create-multiple-Users.json" below, then the users are appended to the "info.json" as lists of objects 3 times (i.e. the number of objects/iterations)
[
{
"id": 10,
"name": "Janet",
"job": "Weaver"
},
{
"id": 12,
"name": "Kwonn",
"job": "Wingtsel"
},
{
"id": 13,
"name": "Eve",
"job": "Holt"
}
]
I would expect the users to be appended one by one as separate objects.
Maybe I am too oversimplifying the looping?
Any help is highly appreciated.
PS: Sorry I couldn't get the multiple-users file formatted ;(
A simple change in your for iteration would help:
import json
import requests
with open(r"create-multiple-Users.json", "r") as payload:
data = json.load(payload)
json_data = json.dumps(data, indent=2)
headers = {'content-type': 'application/json; charset=utf-8'}
for row in data: # Change this to iterate the json list
r = requests.post('http://localhost:3000/users',
data=row, headers=headers) # Send row that is a single object
I found the solution by using the hint thanks to "enriqueojedalara"
import json
import requests
with open(r"create-multiple-Users.json", "r") as payload:
data = json.load(payload) #<class 'list'>
headers = {'content-type': 'application/json; charset=utf-8'}
print("Total number of objects: ", len(data))
for i in range(len(data)):
data_new = json.dumps(data[i])
r = requests.post('http://localhost:3000/users', data=data_new, headers=headers)
print("Item#", i, "added", " -> ", data_new)
I am trying to get my list of contacts from my WIX website using their API endpoint url and the requests module in python. I am totally stuck.
Here's my code so far:
import requests
auth_key = "my auth key"
r = requests.get("https://www.wixapis.com/crm/v1/contacts", headers={"Authorization": auth_key})
print(r.status_code)
dict = r.json()
contacts_list = dict["contacts"]
for i in contacts_list:
for key in i:
print(key, ':', i[key])
Here is what I get:
200
id : long id string 1
emails : [{'tag': 'UNTAGGED', 'email': 'sampleemail1#yahoo.com'}]
phones : []
addresses : [{'tag': 'UNTAGGED', 'countryCode': 'US'}]
metadata : {'createdAt': '2020-07-08T22:41:07.135Z', 'updatedAt': '2020-07-08T22:42:19.327Z'}
source : {'sourceType': 'SITE_MEMBERS'}
id : long id string 2
emails : [{'tag': 'UNTAGGED', 'email': 'sampleemail2#yahoo.com'}]
phones : []
addresses : []
metadata : {'createdAt': '2020-07-03T00:51:21.127Z', 'updatedAt': '2020-07-04T03:26:16.370Z'}
source : {'sourceType': 'SITE_MEMBERS'}
Process finished with exit code 0
Each line is a string. I need each row of the csv to be a new contact (There are two sample contacts). The columns should be the keys. I plan to use the csv module to writerow(Fields), where fields is a list of string (keys) such as Fields = [id, emails, phones, addresses, metadata, source]
All I really need is the emails in a single column of a csv though. Is there a way to maybe just get the email for each contact?
A CSV file with one column is basically just a text file with one item per line, but you can use the csv module to do it if you really want, as shown below.
I commented-out the 'python-requests' stuff and used some sample input for testing.
test_data = {
"contacts": [
{
"id": "long id string 1",
"emails": [
{
"tag": "UNTAGGED",
"email": "sampleemail1#yahoo.com"
}
],
"phones": [],
"addresses": [
{
"tag": "UNTAGGED",
"countryCode": "US"
}
],
"metadata": {
"createdAt": "2020-07-08T22:41:07.135Z",
"updatedAt": "2020-07-08T22:42:19.327Z"
},
"source": {
"sourceType": "SITE_MEMBERS"
}
},
{
"id": "long id string 2",
"emails": [
{
"tag": "UNTAGGED",
"email": "sampleemail2#yahoo.com"
}
],
"phones": [],
"addresses": [],
"metadata": {
"createdAt": "2020-07-03T00:51:21.127Z",
"updatedAt": "2020-07-04T03:26:16.370Z"
},
"source": {
"sourceType": "SITE_MEMBERS"
}
}
]
}
import csv
import json
import requests
auth_key = "my auth key"
output_filename = 'whatever.csv'
#r = requests.get("https://www.wixapis.com/crm/v1/contacts", headers={"Authorization": auth_key})
#print(r.status_code)
#json_obj = r.json()
json_obj = test_data # FOR TESTING PURPOSES
contacts_list = json_obj["contacts"]
with open(output_filename, 'w', newline='') as outp:
writer = csv.writer(outp)
writer.writerow(['email']) # Write csv header.
for contact in contacts_list:
email = contact['emails'][0]['email'] # Get the first one.
writer.writerow([email])
print('email csv file written')
Contents of whatever.csv file afterwards:
email
sampleemail1#yahoo.com
sampleemail2#yahoo.com
Update:
As pointed by #martineau, I just saw you can array in few values, you need to cater it. You may make them string with [].join() in the for loop
you can write it to csv like this using csv package.
import csv, json, sys
auth_key = "my auth key"
r = requests.get("https://www.wixapis.com/crm/v1/contacts", headers={"Authorization": auth_key})
print(r.status_code)
dict = r.json()
contacts_list = dict["contacts"]
output = csv.writer(sys.stdout)
#insert header(keys)
output.writerow(data[0].keys())
for i in contacts_list:
output.writerow(i.values())
At the end you can print and verify output
I've looked through many responses to variants of this problem but still not able to get my code working.
I am trying to use the MS Azure text analytics service and when I paste the example code (including 2/3 sample sentences) it works as you might expect. However, my use case requires the same analysis to be performed on hundreds of free text survey responses so rather than pasting in each and every sentence, I would like to use a JSON file containing these responses as an input, pass that to Azure for analysis and receive back a JSON output.
The code I am using and the response it yields is shown below (note that the last bit of ID 2 response has been chopped off before the error message).
key = "xxxxxxxxxxx"
endpoint = "https://blablabla.cognitiveservices.azure.com/"
import json
with open(r'example.json', encoding='Latin-1') as f:
data = json.load(f)
print (data)
import os
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
def authenticateClient():
credentials = CognitiveServicesCredentials(key)
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint, credentials=credentials)
return text_analytics_client
import requests
# pprint is used to format the JSON response
from pprint import pprint
import os
subscription_key = "xxxxxxxxxxxxx"
endpoint = "https://blablabla.cognitiveservices.azure.com/"
entities_url = "https://blablabla.cognitiveservices.azure.com/text/analytics/v2.1/entities/"
documents = data
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
response = requests.post(entities_url, headers=headers, json=documents)
entities = response.json()
pprint(entities)
[{'ID': 1, 'text': 'dog ate my homework', {'ID': 2, 'text': 'cat sat on the
{'code': 'BadRequest',
'innerError': {'code': 'InvalidRequestBodyFormat',
'message': 'Request body format is wrong. Make sure the json '
'request is serialized correctly and there are no '
'null members.'},
'message': 'Invalid request'}
According to my research, when we call Azure text analytics rest API to Identify Entities, the request body should be like
{
"documents": [
{
"id": "1",
"text": "."
},
{
"id": "2",
"text": ""
}
]
}
For example
My json file
[{
"id": "1",
"text": "dog ate my homework"
}, {
"id": "2",
"text": "cat sat on the sofa"
}
]
My code
key = ''
endpoint = "https://<>.cognitiveservices.azure.com/"
import requests
from pprint import pprint
import os
import json
with open(r'd:\data.json', encoding='Latin-1') as f:
data = json.load(f)
pprint(data)
entities_url = endpoint + "/text/analytics/v2.1/entities?showStats=true"
headers = {"Ocp-Apim-Subscription-Key": key}
documents=data
response = requests.post(entities_url, headers=headers, json=documents)
entities = response.json()
pprint(entities)
pprint("--------------------------------")
documents ={}
documents["documents"]=data
response = requests.post(entities_url, headers=headers, json=documents)
entities = response.json()
pprint(entities)
I'm trying to write a Python script that will extract as one large .json file the (as of this writing) 105,445 WWI serviceperson records from api.aucklandmuseum.com, as shown in this SPARQL query.
So far I have this code:
import requests
import json
url = "http://api.aucklandmuseum.com/search/cenotaph/_search"
numfrom = 0
i = 0
while numfrom <= 105500:
print("From:", numfrom)
payload = "{\"sort\":[{\"dc_identifier\":{\"order\":\"asc\"}}],\"from\":"
payload += (str(numfrom))
payload += ",\"size\":500,\"query\":{\"match\":{\"am_war\":\"World War I, 1914-1918\"}}}"
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
response = requests.request("POST", url, headers=headers, data = payload)
outputjson = response.text
outfilename = "OC"
outfilename += str(i)
outfilename += ".json"
print("Start of",outfilename,":\n",outputjson[:175]) # just first 175 chars
with open(outfilename, mode = 'w+') as outfile:
json.dump(outputjson, outfile)
i += 1
numfrom += 500
This gets 500 records at a time and dumps them to files, printing to the console the head of each file to test that the results are as expected.
I need to combine all this JSON into one big file, but several issues arise:
The JSON output is structured like this:
{
"took": 4,
"timed_out": false,
"_shards": {
"total": 2,
"successful": 2,
"failed": 0
},
"hits": {
"total": 104767,
"max_score": null,
"hits": [
{
"_index": "cenotaph-2019-06-18",
"_type": "am:MilitaryPerson",
"_id": "http://api.aucklandmuseum.com/id/person/C53694",
"_score": null,
"_source": {
"am_medicalInformation": [
{
"am_medical": [
"Other/WWI"
],
"am_record_score": [
"0"
],
"am_notes": [
"Nature of Injury: G.S.W. right foot\nPension: 10/-\nPercent of disability: [Not stated]\nSource: New Zealand. Army (1920). List of the names of all ex-members of the New Zealand Expeditionary Force, suffering permanent disability from 20% to 100%"
]
}
],
[…]
How can I write code (separately to the above would be fine) to combine all JSON output (while retaining the _id of each record, along with its _source data) then "flatten" that data and transform it into a .csv?
I hope the above is clear.
Thank you
Hugh
p.s.: the server is running ElasticSearch v1.5.
*** I have updated code at the bottom.
I have a json object I'm working with and it's coming from Azure analytics for an application we build. I'm trying to figure hot to parse the Url that comes back with just the limits and location keys data in separate columns. The code I'm using is listed here: (keys are take out as well as url because of api keys and tokens)
import request
import pandas as pd
from urllib.parse import urlparse
from furl import furl
import json
d1 = '<This is where I have the rule for the API>'
querystring = {"timespan":"P7D") #get's last 7 days
headers = { Stuff in here for headers }
response = requests.request("GET", d1, headers=headers, parms=querystring)
data = json.loads(response.text)
#then I clean up the stuff in the dataframe
for stuff in data(['value'])
del stuff['count'].....(just a list of all the non needed fields in the json script)
newstuff = json.dumps(data, indent=2, sort_keys=True
data2 = json.loads(newstuff)
ok now here is the part I am having problems with, I want to pull out 3 columns of data from each row. the ['request']['url'], ['timestamp'], ['user']['id']
I'm pretty sure I need to do a for loop so I'm doing the following to get the pieces out.
for x in data2['value']:
time = x['timestamp']
user = x['user']['id']
url = furl(x['request']['url'])
limit = url.args['limit']
location = url.args['location']
What's happening is when I try this i'm getting limit does not exist for every url. I think I have to do a if else statement but not sure how to formulate this. I need to get everything into a dataframe so I can parse it out into a cursor.execute statement which I know how to do.
What's needed.
1. Get the information in the for loop into a dataframe
2. take the url and if the url does not have a Limit or a Location then make it none else put limit in a column and same for location in a column by itself.
Dataframe would look like this
timestamp user limit location
2018-01-01 bob#home.com null
2018-01-01 bill#home.com null
2018-01-01 same#home.com null null
2018-01-02 bob#home.com
here is the information on furl
here is some sample json to test with:
{
"value": [{
"request": {
"url": "https://website/testing"
},
"timestamp": "2018-09-23T18:32:58.153z",
"user": {
"id": ""
}
},
{
"request": {
"url": "https://website/testing/limit?location=31737863-c431-e6611-9420-90b11c44c42f"
},
"timestamp": "2018-09-23T18:32:58.153z",
"user": {
"id": "steve#home.com"
}
},
{
"request": {
"url": "https://website/testing/dealanalyzer?limit=57bd5872-3f45-42cf-bc32-72ec21c3b989&location=31737863-c431-e611-9420-90b11c44c42f"
},
"timestamp": "2018-09-23T18:32:58.153z",
"user": {
"id": "tom#home.com"
}
}
]
}
import requests
import pandas as pd
from urllib.parse import urlparse
import json
from pandas.io.json import json_normalize
d1 = "https://nowebsite/v1/apps/11111111-2222-2222-2222-33333333333333/events/requests"
querystring = {"timespan":"P7D"}
headers = {
'x-api-key': "xxxxxxxxxxxxxxxxxxxxxxxx",
'Cache-Control': "no-cache",
'Postman-Token': "xxxxxxxxxxxxxxxxxxxx"
}
response = requests.request("GET", d1, headers=headers, params=querystring)
data = json.loads(response.text)
# delete crap out of API GET Request
for stuff in data['value']:
del stuff['count']
del stuff['customDimensions']
del stuff['operation']
del stuff['session']
del stuff['cloud']
del stuff['ai']
del stuff['application']
del stuff['client']
del stuff['id']
del stuff['type']
del stuff['customMeasurements']
del stuff['user']['authenticatedId']
del stuff['user']['accountId']
del stuff['request']['name']
del stuff['request']['success']
del stuff['request']['duration']
del stuff['request']['performanceBucket']
del stuff['request']['resultCode']
del stuff['request']['source']
del stuff['request']['id']
newstuff = json.dumps(data, indent=2, sort_keys=True)
#print(newstuff)
# Now it's in a cleaner format to work with
data2 = json.loads(newstuff)
json_normalize(data2['value'])
From here the data is in a pandas dataframe and looks like I want it to.
I just need to know how to use the furl to pull the limit and location out of the url on a per row basis and create a new column called load and limits as mentions above.