I am trying to loop through a CSV and run a web request. I can't get it past the first row in the CSV.
The CSV is being exported from Mac Excel as a list of 10 items in 10 rows / 1 column.
def AddTokens(request):
import csv
tokenList = []
output = 0
apikey = "12345"
restkey = "12345"
URL = "https://api.web.com/1/install/"
headers = {'content-type': 'application/json', 'X-web-Application-Id': apikey, 'X-web-REST-API-Key': restkey}
with open('/Users/name/Desktop/test.csv', 'rU') as csvfile:
deviceTokens = csv.reader(csvfile, delimiter=',')
for token in deviceTokens:
deviceToken = token[0].replace("/", "")
deviceType = "ios"
pushToken = "pushtoken_" + deviceToken
payload = {"deviceType": deviceType, "deviceToken": deviceToken, "channels": ["", pushToken]}
r = requests.post(URL, data=json.dumps(payload), headers=headers)
t = get_template('addpush.html')
html = t.render(Context({'output': output, 'tokenList': tokenList, 'deviceTokens': deviceTokens, 'token': token}))
return HttpResponse(html)
Related
How do I make below working code to iterate over data["#odata.nextLink"] and append the data["value"] to sample.json file?
import requests
import json
import datetime
def get_data():
bearerAccessToken = '*************'
now = datetime.datetime.now()-datetime.timedelta(days=10)
dt_string = now.strftime("%Y-%m-%dT%H:%M:%S-04:00")
print(dt_string)
resourceUrl = "https://retsapi.raprets.com/CIN/RESO/OData/Property?Class=Residential&$count=true"
query_params = {"$filter":"ModificationTimestamp ge "+dt_string}
print(query_params)
r = requests.get(resourceUrl, params=query_params, headers={'Authorization' : 'Bearer '+ bearerAccessToken})
data = r.json()
with open("sample.json", "w") as outfile:
json.dump(data["value"], outfile)
print(data["#odata.nextLink"])
get_data()
What is the best way to get this to read each next page of results? Currently data is pulling but only page 1
import requests
import json
page = 1
url = "https://api-prod.grip.events/1/container/4368/search?search=&sort=name&order=asc&type_id=4907,4906,5265,4964,4904,1026,4908&page=%d"
headers = {
'x-authorization': 'a422cc2a-31fb-4b4e-a1bd-a34b561adc6c'
}
with open("list.txt", "w") as f:
for page in range(1, 1000):
response = requests.get(url % page, headers=headers).json()
contacts = response["data"]
for contact in contacts:
target = "%s\t%s\t%s\t%s" % (contact["company_name"], contact["job_title"], contact["name"], contact["job_industry"])
f.write(target + "\n")
print(target)
found your site's encoding was utf-8 so maybe try this:
import requests
import json
page = 1
url = "https://api-prod.grip.events/1/container/4368/search?search=&sort=name&order=asc&type_id=4907,4906,5265,4964,4904,1026,4908&page=%d"
headers = {
'x-authorization': 'a422cc2a-31fb-4b4e-a1bd-a34b561adc6c'
}
with open("list.txt", "w", encoding='utf-8') as f: #added " encoding='utf-8' "
for page in range(1, 1000):
response = requests.get(url % page, headers=headers).json()
contacts = response["data"]
for contact in contacts:
target = "%s\t%s\t%s\t%s" % (contact["company_name"], contact["job_title"], contact["name"], contact["job_industry"])
f.write(target + "\n")
print(target)
I want to get all the values for the key 'id' in data (without all the other extra information, simply just all of the id values (e.g 60220611, 76744679)
import requests
import json
import urllib, json
import time
proxies = {"http": "http://176.9.75.42:3128",
"http": "http://88.198.50.103:8080"}
user_id = 4913866
def jprint(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
print(text)
user_id = 367
URL = f"https://badges.roblox.com/v1/users/{user_id}/badges"
data = []
payload = {"limit": 100, "sortOrder": "Asc"}
resp = requests.get(URL, params=payload)
resp.json()
blob = resp.json()
data.extend(blob["data"])
cursor = blob["nextPageCursor"]
while cursor is not None:
payload.update({"cursor": cursor})
resp = requests.get(URL, params=payload, proxies=proxies)
blob = resp.json()
data.extend(blob["data"])
cursor = blob["nextPageCursor"]
data.append([cursor])
print(data)
import requests
import json
import urllib, json
import time
proxies = {"http": "http://176.9.75.42:3128",
"http": "http://88.198.50.103:8080"}
user_id = 4913866
def jprint(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
print(text)
user_id = 367
URL = f"https://badges.roblox.com/v1/users/{user_id}/badges"
data = []
payload = {"limit": 5, "sortOrder": "Asc"}
resp = requests.get(URL, params=payload)
resp.json()
blob = resp.json()
data.extend(blob["data"])
cursor = blob["nextPageCursor"]
while cursor is not None:
payload.update({"cursor": cursor})
resp = requests.get(URL, params=payload, proxies=proxies)
blob = resp.json()
data.extend(blob["data"])
cursor = blob["nextPageCursor"]
data.append([cursor])
print(data)
# iterate over data list and get the key.
id_list = [each['id'] for each in data]
print(id_list)
I'm collecting tweets from Twitter's API. My code is returning a string which I have transformed into a dictionary. I am looking to create a CSV where I store this data by creating columns. I have attached an image of my CSV currently looks like.
current CSV image:
.
What suggestions do you suggest for creating something like the following;
desired outcome:
with open('dict.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in y.items():
writer.writerow([key, value])
#with open('my_file.csv', 'w') as f:
# [f.write('{0},{1}\n'.format(key, value)) for key, value in y.items()]
Full code:
import requests
import os
import json
import pandas as pd
import csv
import sys
import time
bearer_token = "insert here"
search_url = "https://api.twitter.com/2/tweets/search/all"
query_params = {'query': '(Johnson&Johnson) -is:retweet -is:verified -baby -lotion -shampoo','tweet.fields': 'text', 'tweet.fields':'created_at', 'start_time':'2021-01-20T00:00:01.000Z', 'end_time':'2021-02-17T23:30:00.000Z'}
#query_params={'query':'(vaccine OR vaccinated) -is:retweet -is:verified -RT -baby -lotion -shampoo&start_time=2021-01-20T00:00:01.000Z&end_time=2021-02-20T23:30:00.000Z&max_results=10&tweet.fields=author_id,conversation_id,created_at,geo,id,lang,source,text&expansions=author_id&place.fields=full_name&user.fields=created_at,description,entities,id,location,name,url,username'}
def create_headers(bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
def connect_to_endpoint(url, headers, params):
response = requests.request("GET", search_url, headers=headers, params=params)
print('first:', response.status_code)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
return response.json()
def main():
headers = create_headers(bearer_token)
json_response = connect_to_endpoint(search_url, headers, query_params)
x = json.dumps(json_response,sort_keys=True)
y = json.loads(x)
if __name__ == "__main__":
main()
Try Using DictWriter,
import csv
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
For more info refer below link,
How to save a Python Dictionary to a CSV File?
I've a dataframe as below -
type
url
Cov
link1.ndjson
Cov
link2.ndjson
EOB
link1.ndjson
Patient
link1.ndjson
There are N number of rows with links for three types of files. Now, as a new-bie in Python, I use the type of file and it's link one by one to download the files.
I can download the files now manually using code
import requests
url='https://fakesite.com/4472/link1.ndjson' #the link is from first row of above dataframe
headers = {
'Authorization': 'Bearer %s' %access_token,
'Accept-Encoding': 'gzip',
}
response = requests.get(url, headers=headers)
print(response.status_code)
import sys
original_stdout = sys.stdout # Save a reference to the original standard output
with open('Cov_link1.ndjson', 'w') as f:
#file is concatenation of first row of dataframe type+_+url
sys.stdout = f # Change the standard output to the file we created.
print(response.text)
sys.stdout = original_stdout # Reset the standard output to its original value
The request is to download files i.e all the N number of rows in dataframe. Can someone please help?
Kindly check if this serves your purpose:
import pandas as pd
import sys
df = pd.DataFrame({'type':['Cov','Cov','EOB','Patient'], 'url':['link1.ndjson','link2.ndjson','link1.ndjson','link1.ndjson']})
df['name'] = df['type'] + '_' + df['url']
url_list = list(df['url'])
name_list = list(df['name'])
headers = {
'Authorization': 'Bearer %s' %access_token,
'Accept-Encoding': 'gzip',
}
result = list(zip(url_list,name_list))
for i,j in result:
url_list ='url' + i
response = requests.get(url, headers=headers)
print(response.status_code)
original_stdout = sys.stdout
with open(j, 'w') as f:
sys.stdout = f
print(response.text)
sys.stdout = original_stdout