In the Python code for requesting data from Google Analytics ( https://developers.google.com/analytics/devguides/reporting/core/v4/quickstart/service-py ) via an API, oauth2client is being used. The code was last time updated in July 2018 and until now the oauth2client is deprecated. My question is can I get the same code, where instead of oauth2client, google-auth or oauthlib is being used ?
I was googling to find a solution how to replace the parts of code where oauth2client is being used. Yet since I am not a developer I didn't succeed. This is how I tried to adapt the code in this link ( https://developers.google.com/analytics/devguides/reporting/core/v4/quickstart/service-py ) to google-auth. Any idea how to fix this ?
import argparse
from apiclient.discovery import build
from google.oauth2 import service_account
from google.auth.transport.urllib3 import AuthorizedHttp
SCOPES = ['...']
DISCOVERY_URI = ('...')
CLIENT_SECRETS_PATH = 'client_secrets.json' # Path to client_secrets.json file.
VIEW_ID = '...'
def initialize_analyticsreporting():
"""Initializes the analyticsreporting service object.
Returns:l
analytics an authorized analyticsreporting service object.
"""
# Parse command-line arguments.
credentials = service_account.Credentials.from_service_account_file(CLIENT_SECRETS_PATH)
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
authed_http = AuthorizedHttp(credentials)
response = authed_http.request(
'GET', SCOPES)
# Build the service object.
analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
return analytics
def get_report(analytics):
# Use the Analytics Service Object to query the Analytics Reporting API V4.
return analytics.reports().batchGet(
body=
{
"reportRequests":[
{
"viewId":VIEW_ID,
"dateRanges":[
{
"startDate":"2019-01-01",
"endDate":"yesterday"
}],
"dimensions":[
{
"name":"ga:transactionId"
},
{
"name":"ga:sourceMedium"
},
{
"name":"ga:date"
}],
"metrics":[
{
"expression":"ga:transactionRevenue"
}]
}]
}
).execute()
def printResults(response):
for report in response.get("reports", []):
columnHeader = report.get("columnHeader", {})
dimensionHeaders = columnHeader.get("dimensions", [])
metricHeaders = columnHeader.get("metricHeader", {}).get("metricHeaderEntries", [])
rows = report.get("data", {}).get("rows", [])
for row in rows:
dimensions = row.get("dimensions", [])
dateRangeValues = row.get("metrics", [])
for header, dimension in zip(dimensionHeaders, dimensions):
print (header + ": " + dimension)
for i, values in enumerate(dateRangeValues):
for metric, value in zip(metricHeaders, values.get("values")):
print (metric.get("name") + ": " + value)
def main():
analytics = initialize_analyticsreporting()
response = get_report(analytics)
printResults(response)
if __name__ == '__main__':
main()
I need to obtain response in form of a json with given dimensions and metrics from Google Analytics.
For those running into this problem and wish to port to the newer auth libraries, do a diff b/w the 2 different versions of the short/simple Google Drive API sample at the code repo for the G Suite APIs intro codelab to see what needs to be updated (and what can stay as-is). The bottom-line is that the API client library code can remain the same while all you do is swap out the auth libraries underneath.
Note that sample is only for user acct auth... for svc acct auth, the update is similar, but I don't have an example of that yet (working on one though... will update this once it's published).
Related
I've been trying to make a script that update the YouTube title with the video views, like the Tom Scott video. But I keep getting this error with the authentication.
I get the code to authenticate it but it just says I don't have enough permission.
<HttpError 400 when requesting https://youtube.googleapis.com/youtube/v3/videos?part=%28%27snippet%2Cstatus%2Clocalizations%27%2C%29&alt=json returned "'('snippet'". Details: "[{'message': "'('snippet'", 'domain': 'youtube.part', 'reason': 'unknownPart', 'location': 'part', 'locationType': 'parameter'}]"> File "C:\Users\erik\Documents\PYTHON CODE\view_changer\example.py", line 49, in main response_update = request_update.execute().
Here's my code:
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
scopes = ["https://www.googleapis.com/auth/youtube"]
api_key = "..."
video_1_part_view = 'statistics'
video_1_id_view = 'hVuoPgZJ3Yw'
video_1_part_update= "snippet,status,localizations",
video_1_part_body_update= { "id": "1y94SadSsMU",
"localizations": {"es": { "title": "no hay nada a ver aqui",
"description": "Esta descripcion es en español."
}
}
}
def main():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = (r"C:\Users\erik\Documents\PYTHON CODE\view_changer\client_secret_379587650859-bbl63je7sh6kva19l33auonkledt09a9.apps.googleusercontent.com.json")
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(api_service_name, api_version, credentials=credentials)
#check views
request_view= youtube.videos().list( part = video_1_part_view, id = video_1_id_view )
respons_view = request_view.execute()
views = respons_view['items'][0]['statistics']['viewCount']
print (views)
#update video
request_update = youtube.videos().update(part= video_1_part_update, body = video_1_part_body_update)
response_update = request_update.execute()
print(response_update)
main()
# if __name__ == "__main__":
# main()
If the problem is not in the code I might have an other idea of what it might be. When I was first setting up the OAuth, I didn't know I needed to pick what the user would be giving permission to (I know rookie mistake(: ), but since I updated it, when I ask for permission after going to the authentication link the code gives me back, it doesn't ask permission for all the things I told it to do.
According to the official documentation, the request parameter part of the Videos.list API endpoint is defined as:
part (string)
The part parameter specifies a comma-separated list of one or more video resource properties that the API response will include.
[...]
But you're having that request parameter set as:
('snippet,status,localizations',).
That's because (though your code above doesn't show it) you have the variable video_1_part_view defined as:
video_1_part_view = "snippet,status,localizations",
(Do notice the trailing comma). That makes video_1_part_view a Python tuple, which is not what you need to send to the API endpoint.
The same is true for your variable video_1_part_update.
Just remove the trailing commas and things will work OK.
The code I've written seems to be what I need, however it doesn't work and I get a 401 error (authentication) I've tried everything: 1. Service account permissions 2. create secret id and key (not sure how to use those to get access token though) 3. Basically, tried everything for the past 2 days.
import requests
from google.oauth2 import service_account
METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1/'
METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
SERVICE_ACCOUNT = [NAME-OF-SERVICE-ACCOUNT-USED-WITH-CLOUD-FUNCTION-WHICH-HAS-COMPUTE-ADMIN-PRIVILEGES]
def get_access_token():
url = '{}instance/service-accounts/{}/token'.format(
METADATA_URL, SERVICE_ACCOUNT)
# Request an access token from the metadata server.
r = requests.get(url, headers=METADATA_HEADERS)
r.raise_for_status()
# Extract the access token from the response.
access_token = r.json()['access_token']
return access_token
def start_vms(request):
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'number_of_instances_to_create' in request_json:
number_of_instances_to_create = request_json['number_of_instances_to_create']
elif request_args and 'number_of_instances_to_create' in request_args:
number_of_instances_to_create = request_args['number_of_instances_to_create']
else:
number_of_instances_to_create = 0
access_token = get_access_token()
address = "https://www.googleapis.com/compute/v1/projects/[MY-PROJECT]/zones/europe-west2-b/instances?sourceInstanceTemplate=https://www.googleapis.com/compute/v1/projects/[MY-PROJECT]/global/instanceTemplates/[MY-INSTANCE-TEMPLATE]"
headers = {'token': '{}'.format(access_token)}
for i in range(1,number_of_instances_to_create):
data = {'name': 'my-instance-{}'.format(i)}
r = requests.post(address, data=data, headers=headers)
r.raise_for_status()
print("my-instance-{} created".format(i))
Any advice/guidance? If someone could tell me how to get an access token using secret Id and key. Also, I'm not too sure if OAuth2.0 will work because I essentially want to turn these machines on, and they do some processing and then self destruct. So there is no user involvement to allow access. If OAuth2.0 is the wrong way to go about it, what else can I use?
I tried using gcloud, but subprocess'ing gcloud commands aren't recommended.
I did something similar to this, though I used the Node 10 Firebase Functions runtime, but should be very similar never-the-less.
I agree that OAuth is not the correct solution since there is no user involved.
What you need to use is 'Application Default Credentials' which is based on the permissions available to your cloud functions' default service account which will be the one labelled as "App Engine default service account" here:
https://console.cloud.google.com/iam-admin/serviceaccounts?folder=&organizationId=&project=[YOUR_PROJECT_ID]
(For my project that service account already had the permissions necessary for starting and stopping GCE instances, but for other API's I have grant it permissions manually.)
ADC is for server-to-server API calls. To use it I called google.auth.getClient (of the Google APIs Auth Library) with just the scope, ie. "https://www.googleapis.com/auth/cloud-platform".
This API is very versatile in that it returns whatever credentials you need, so when I am running on cloud functions it returns a 'Compute' object and when I'm running in the emulator it gives me a "UserRefreshClient" object.
I then include that auth object in my call to compute.instances.insert() and compute.instances.stop().
Here the template I used for testing my code...
{
name: 'base',
description: 'Temporary instance used for testing.',
tags: { items: [ 'test' ] },
machineType: `zones/${zone}/machineTypes/n1-standard-1`,
disks: [
{
autoDelete: true, // you will want this!
boot: true,
type: 'PERSISTENT',
initializeParams: {
diskSizeGb: '10',
sourceImage: "projects/ubuntu-os-cloud/global/images/ubuntu-minimal-1804-bionic-v20190628",
}
}
],
networkInterfaces: [
{
network: `https://www.googleapis.com/compute/v1/projects/${projectId}/global/networks/default`,
accessConfigs: [
{
name: 'External NAT',
type: 'ONE_TO_ONE_NAT'
}
]
}
],
}
Hope that helps.
If you’re getting a 401 error that means that the access token you're using is either expired or invalid.
This guide will be able to show you how to request OAuth 2.0 access tokens and make API calls using a Service Account: https://developers.google.com/identity/protocols/OAuth2ServiceAccount
The .json file mentioned is the private key you create in IAM & Admin under your service account.
Unable to get all results of mediaItems.search:
photos = google.get_service(credentials, 'photoslibrary', 'v1')
request = photos.albums().list(pageSize=50)
while request is not None:
result = request.execute()
for album in result['albums']:
request2 = photos.mediaItems().search(body={'albumId': album['id']})
while request2 is not None:
result2 = request2.execute()
request2 = photos.mediaItems().search_next(request2, result2)
print('nextpageToken' in result2, request2)
request = photos.albums().list_next(request, result)
Running this fails on the first search_next() call with
[...]
File "/usr/local/lib/python3.7/dist-packages/googleapiclient/_helpers.py", line 130, in positional_wrapper
return wrapped(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/googleapiclient/http.py", line 851, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 400 when requesting https://photoslibrary.googleapis.com/v1/mediaItems:search?alt=json returned "Invalid JSON payload received. Unexpected end of string. Expected an object key or }.
That library isn't really supported it seems, so that might be the problem, or am I missing something here?
The google-api-python-client is generic client for all the Google's discovery based APIs and as such it supports all the API based on this protocol including the Photos one.
The correct way to use the services is to invoke the build method and after that use the available methods of the services.
Beside this you always want to use list_next as search_next does not exists.
Here is an example of the photos API working on my laptop (python 3.6)
import os
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/photoslibrary.readonly', ]
# we check if we save the credentials in the past and we reuse them
if not os.path.exists('credentials.dat'):
# no credentials found, we run the standard auth flow
flow = InstalledAppFlow.from_client_secrets_file('client_id.json', SCOPES)
credentials = flow.run_local_server()
with open('credentials.dat', 'wb') as credentials_dat:
pickle.dump(credentials, credentials_dat)
else:
with open('credentials.dat', 'rb') as credentials_dat:
credentials = pickle.load(credentials_dat)
if credentials.expired:
credentials.refresh(Request())
photos_sdk = build('photoslibrary', 'v1', credentials=credentials)
# photos API
photos_albums_api = photos_sdk.albums()
photos_mediaitems_api = photos_sdk.mediaItems()
albums_list_params = {
'pageSize': 50,
}
# first request
albums_list_req = photos_albums_api.list(**albums_list_params)
while albums_list_req is not None:
photos_albums_list = albums_list_req.execute()
# print(photos_albums_list)
for album in photos_albums_list['albums']:
print(album['title'])
mediaitems_search_req = photos_mediaitems_api.search(body={'albumId': album['id']})
while mediaitems_search_req is not None:
mediaitems_search = mediaitems_search_req.execute()
print(mediaitems_search)
# mediaItems pagination management
mediaitems_search_req = photos_mediaitems_api.list_next(mediaitems_search_req, mediaitems_search)
# albums pagination handling
albums_list_req = photos_albums_api.list_next(albums_list_req, photos_albums_list)
If there are more results than the specified pageSize, the API returns a pageToken, you should use to request the next part. See the example here Access Google Photo API with Python using google-api-python-client
I need to get the list of all files and folders in google drive owned by a user. For some reasons file.list method doesn't return nextPageToken in response and I see only few results, since I can't go to the next page.
I have tried API Client Library for python and API explorer, but I receive only several records per user.
My code
users = ['user1#domain.com', 'user2#domain.com']
drive_array = []
if users:
for item in users:
page_token_drive = None
query = "'%s' in owners" % (item)
while True:
drive_result = service_drive.files().list(q=query, corpora='domain', includeTeamDriveItems=False,
supportsTeamDrives=False, fields='nextPageToken, files(id,owners)',
pageToken=page_token_drive).execute()
drive_array.extend(drive_result.get('files', []))
page_token_drive = drive_result.get('nextPageToken', None)
if not page_token_drive:
break
I expect to get id of a file and owners array for all files owned by the user
[
{
"id": "12344",
"owners": [
{
"kind": "drive#user",
"displayName": "User1 User1",
"photoLink": "https://lg",
"me": false,
"permissionId": "1234556",
"emailAddress": "user1#domain.com"
}
]
},
{
"id": "09875",
"owners": [
{
"kind": "drive#user",
"displayName": "User1 User1",
"photoLink": "https://lh5",
"me": false,
"permissionId": "56565665655656566",
"emailAddress": "user1#domain.com"
}
]
}
]
According to the documentation, to authorize requests to G Suite APIs, you need to use OAuth 2.0 and you basically have two options (or flows if you want to stick with the official terminology):
User authorization with a consent screen (e.g OAuth 2.0 for installed applications)
Domain-wide delegation for server to server applications (e.g. Using OAuth 2.0 for Server to Server Applications)
With the first option, once that the user completes the flow, you can only access to its resources. So if you want to list all the contents of drive for different users in the G Suite domain, you need to use the second option.
I also recommend you to use the python client pagination feature to manage the listing of files.
Here is a working example of option 1 (Python 3.6)
import os
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/drive', ]
users = ['user1#domain.eu', 'user2#domain.eu']
# we check if we save the credentials in the past and we reuse them
if not os.path.exists('credentials.dat'):
# no credentials found, we run the standard auth flow
flow = InstalledAppFlow.from_client_secrets_file('client_id.json', SCOPES)
credentials = flow.run_local_server()
with open('credentials.dat', 'wb') as credentials_dat:
pickle.dump(credentials, credentials_dat)
else:
with open('credentials.dat', 'rb') as credentials_dat:
credentials = pickle.load(credentials_dat)
if credentials.expired:
credentials.refresh(Request())
drive_sdk = build('drive', 'v3', credentials=credentials)
# drive files API
drive_files_api = drive_sdk.files()
for item in users:
query = "'{}' in owners".format(item)
drive_list_params = {
'q': query,
'corpora': 'domain',
'includeTeamDriveItems': False,
'supportsTeamDrives': False,
'fields': 'files(id,owners),nextPageToken',
}
# first request
files_list_req = drive_files_api.list(**drive_list_params)
while files_list_req is not None:
drive_file_list = files_list_req.execute()
print(drive_file_list.get('files', []))
# pagination handling
files_list_req = drive_files_api.list_next(files_list_req, drive_file_list)
If you run this, you will be prompted for authorization and the script will run on your drive listing files owned by other users and shared with you.
If you want to use the server-to-server flow with domain-wide delegation to list all the files (not just the ones shared with you), here is another working sample.
from googleapiclient.discovery import build
from google.oauth2 import service_account
SCOPES = ['https://www.googleapis.com/auth/drive', ]
users = ['user1#domain.eu', 'user2#domain.eu']
credentials = service_account.Credentials.from_service_account_file('client_secret.json', scopes=SCOPES)
for item in users:
delegated_credentials = credentials.with_subject(item)
drive_sdk = build('drive', 'v3', credentials=delegated_credentials)
# drive files API
drive_files_api = drive_sdk.files()
drive_list_params = {
'corpora': 'domain',
'includeTeamDriveItems': False,
'supportsTeamDrives': False,
'fields': 'files(id,owners),nextPageToken',
}
# first request
files_list_req = drive_files_api.list(**drive_list_params)
while files_list_req is not None:
drive_file_list = files_list_req.execute()
print(drive_file_list.get('files', []))
# pagination handling
files_list_req = drive_files_api.list_next(files_list_req, drive_file_list)
I'm trying to get a very simple Python script to talk to Freebase.
All the examples I've found use the simple / api key authorization model. So I made a Google Developer account, made a project, and tried to get a key as Google says to. It demands I provide a list of numeric IP addresses that I'll call from. Not feasible, since I don't have a fixed IP (I do have dyndns set up, but that doesn't help since Google won't take a domain name, only numerics).
So I tried OAuth2, which is overkill for what I need (I'm not accessing any non-public user data). But I couldn't find even one online example of using OAuth2 for Freebase. I tried adjusting other examples, but after bouncing around between appengine, Decorator, several obsolete Python libraries, and several other approaches, I got nowhere.
Can anyone either explain or point to a good example of how to do this (without spending 10x more time on authorization, than on the app I'm trying to authorize)? A working example with OAuth2, preferably without many layers of "simplifying" APIs; or a tip on how to get around the fixed-IP requirement for API key authorization, would be fantastic. Thanks!
Steve
I had to do this for Google Drive, but as far as I know this should work for any Google API.
When you create a new Client ID in the developer console, you should have the option to create a Service Account. This will create a public/private key pair, and you can use that to authenticate without any OAuth nonsense.
I stole this code out of our GDrive library, so it may be broke and it is GDrive specific, so you will need to replace anything that says "drive" with whatever Freebase wants.
But I hope it's enough to get you started.
# Sample code that connects to Google Drive
from apiclient.discovery import build
import httplib2
from oauth2client.client import SignedJwtAssertionCredentials, VerifyJwtTokenError
SERVICE_EMAIL = "you#gmail.com"
PRIVATE_KEY_PATH ="./private_key.p12"
# Load private key
key = open(PRIVATE_KEY_PATH, 'rb').read()
# Build the credentials object
credentials = SignedJwtAssertionCredentials(SERVICE_EMAIL, key, scope='https://www.googleapis.com/auth/drive')
try:
http = httplib2.Http()
http = credentials.authorize(http)
except VerifyJwtTokenError as e:
print(u"Unable to authorize using our private key: VerifyJwtTokenError, {0}".format(e))
raise
connection = build('drive', 'v2', http=http)
# You can now use connection to call anything you need for freebase - see their API docs for more info.
Working from #Rachel's sample code, with a bit of fiddling I got to this, which works, and illustrates the topic, search, and query features.
Must install libraries urllib and json, plus code from https://code.google.com/p/google-api-python-client/downloads/list
Must enable billing from 'settings' for the specific project
The mglread() interface for Python is broken as of April 2014.
The documented 'freebase.readonly' scope doesn't work.
from apiclient.discovery import build
import httplib2
from oauth2client.client import SignedJwtAssertionCredentials, VerifyJwtTokenError
# Set up needed constants
#
SERVICE_EMAIL = args.serviceEmail
PRIVATE_KEY_PATH = args.privateKeyFile
topicID = args.topicID
query = args.query
search_url = 'https://www.googleapis.com/freebase/v1/search'
topic_url = 'https://www.googleapis.com/freebase/v1/topic'
mql_url = "https://www.googleapis.com/freebase/v1/mqlread"
key = open(PRIVATE_KEY_PATH, 'rb').read()
credentials = SignedJwtAssertionCredentials(SERVICE_EMAIL, key,
scope='https://www.googleapis.com/auth/freebase')
try:
http = httplib2.Http()
http = credentials.authorize(http)
except VerifyJwtTokenError as e:
print(u"Unable to authorize via private key: VerifyJwtTokenError, {0}".format(e))
raise
connection = build('freebase', 'v1', http=http)
# Search for a topic by Freebase topic ID
# https://developers.google.com/freebase/v1/topic-overview
#
params = { 'filter': 'suggest' }
url = topic_url + topicID + '?' + urllib.urlencode(params)
if (args.verbose): print("URL: " + url)
resp = urllib.urlopen(url).read()
if (args.verbose): print("Response: " + resp)
respJ = json.loads(resp)
print("Topic property(s) for '%s': " % topicID)
for property in respJ['property']:
print(' ' + property + ':')
for value in respJ['property'][property]['values']:
print(' - ' + value['text'])
print("\n")
# Do a regular search
# https://developers.google.com/freebase/v1/search-overview
#
params = { 'query': query }
url = search_url + '?' + urllib.urlencode(params)
if (args.verbose): print("URL: " + url)
resp = urllib.urlopen(url).read()
if (args.verbose): print("Response: " + resp)
respJ = json.loads(resp)
print("Search result for '%s': " % query)
theKeys = {}
for res in respJ['result']:
print ("%-40s %-15s %10.5f" %
(res['name'], res['mid'], res['score']))
params = '{ "id": "%s", "type": []}' % (res['mid'])
# Run a query on the retrieved ID, to get its types:
url = mql_url + '?query=' + params
resp = urllib.urlopen(url).read()
respJ = json.loads(resp)
print(" Type(s): " + `respJ['result']['type']`)
otherKeys = []
for k in res:
if (k not in ['name', 'mid', 'score']): otherKeys.append(k)
if (len(otherKeys)): print(" Other keys: " + ", ".join(otherKeys))
sys.exit(0)