Can you produce a Python example of how to download a Google Sheets spreadsheet given its key and worksheet ID (gid)? I can't.
I've scoured versions 1, 2 and 3 of the API. I'm having no luck, I can't figure out their compilcated ATOM-like feeds API, the gdata.docs.service.DocsService._DownloadFile private method says that I'm unauthorized, and I don't want to write an entire Google Login authentication system myself. I'm about to stab myself in the face due to frustration.
I have a few spreadsheets and I want to access them like so:
username = 'mygooglelogin#gmail.com'
password = getpass.getpass()
def get_spreadsheet(key, gid=0):
... (help!) ...
for row in get_spreadsheet('5a3c7f7dcee4b4f'):
cell1, cell2, cell3 = row
...
Please save my face.
Update 1: I've tried the following, but no combination of Download() or Export() seems to work. (Docs for DocsService here)
import gdata.docs.service
import getpass
import os
import tempfile
import csv
def get_csv(file_path):
return csv.reader(file(file_path).readlines())
def get_spreadsheet(key, gid=0):
gd_client = gdata.docs.service.DocsService()
gd_client.email = 'xxxxxxxxx#gmail.com'
gd_client.password = getpass.getpass()
gd_client.ssl = False
gd_client.source = "My Fancy Spreadsheet Downloader"
gd_client.ProgrammaticLogin()
file_path = tempfile.mktemp(suffix='.csv')
uri = 'http://docs.google.com/feeds/documents/private/full/%s' % key
try:
entry = gd_client.GetDocumentListEntry(uri)
# XXXX - The following dies with RequestError "Unauthorized"
gd_client.Download(entry, file_path)
return get_csv(file_path)
finally:
try:
os.remove(file_path)
except OSError:
pass
The https://github.com/burnash/gspread library is a newer, simpler way to interact with Google Spreadsheets, rather than the old answers to this that suggest the gdata library which is not only too low-level, but is also overly-complicated.
You will also need to create and download (in JSON format) a Service Account key: https://console.developers.google.com/apis/credentials/serviceaccountkey
Here's an example of how to use it:
import csv
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scope)
docid = "0zjVQXjJixf-SdGpLKnJtcmQhNjVUTk1hNTRpc0x5b9c"
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)
for i, worksheet in enumerate(spreadsheet.worksheets()):
filename = docid + '-worksheet' + str(i) + '.csv'
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerows(worksheet.get_all_values())
In case anyone comes across this looking for a quick fix, here's another (currently) working solution that doesn't rely on the gdata client library:
#!/usr/bin/python
import re, urllib, urllib2
class Spreadsheet(object):
def __init__(self, key):
super(Spreadsheet, self).__init__()
self.key = key
class Client(object):
def __init__(self, email, password):
super(Client, self).__init__()
self.email = email
self.password = password
def _get_auth_token(self, email, password, source, service):
url = "https://www.google.com/accounts/ClientLogin"
params = {
"Email": email, "Passwd": password,
"service": service,
"accountType": "HOSTED_OR_GOOGLE",
"source": source
}
req = urllib2.Request(url, urllib.urlencode(params))
return re.findall(r"Auth=(.*)", urllib2.urlopen(req).read())[0]
def get_auth_token(self):
source = type(self).__name__
return self._get_auth_token(self.email, self.password, source, service="wise")
def download(self, spreadsheet, gid=0, format="csv"):
url_format = "https://spreadsheets.google.com/feeds/download/spreadsheets/Export?key=%s&exportFormat=%s&gid=%i"
headers = {
"Authorization": "GoogleLogin auth=" + self.get_auth_token(),
"GData-Version": "3.0"
}
req = urllib2.Request(url_format % (spreadsheet.key, format, gid), headers=headers)
return urllib2.urlopen(req)
if __name__ == "__main__":
import getpass
import csv
email = "" # (your email here)
password = getpass.getpass()
spreadsheet_id = "" # (spreadsheet id here)
# Create client and spreadsheet objects
gs = Client(email, password)
ss = Spreadsheet(spreadsheet_id)
# Request a file-like object containing the spreadsheet's contents
csv_file = gs.download(ss)
# Parse as CSV and print the rows
for row in csv.reader(csv_file):
print ", ".join(row)
You might try using the AuthSub method described in the Exporting Spreadsheets section of the documentation.
Get a separate login token for the spreadsheets service and substitue that for the export. Adding this to the get_spreadsheet code worked for me:
import gdata.spreadsheet.service
def get_spreadsheet(key, gid=0):
# ...
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.email = gd_client.email
spreadsheets_client.password = gd_client.password
spreadsheets_client.source = "My Fancy Spreadsheet Downloader"
spreadsheets_client.ProgrammaticLogin()
# ...
entry = gd_client.GetDocumentListEntry(uri)
docs_auth_token = gd_client.GetClientLoginToken()
gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
gd_client.Export(entry, file_path)
gd_client.SetClientLoginToken(docs_auth_token) # reset the DocList auth token
Notice I also used Export, as Download seems to give only PDF files.
(Jul 2016) All other answers are pretty much outdated or will be, either because they use GData ("Google Data") Protocol, ClientLogin, or AuthSub, all of which have been deprecated. The same is true for all code or libraries that use the Google Sheets API v3 or older.
Modern Google API access occurs using API keys (for accessing public data), OAuth2 client IDs (for accessing data owned by users), or service accounts (for accessing data owned by applications/in the cloud) primarily with the Google Cloud client libraries for GCP APIs and Google APIs Client Libraries for non-GCP APIs. For this task, it would be the latter for Python.
To make it happen your code needs authorized access to the Google Drive API, perhaps to query for specific Sheets to download, and then to perform the actual export(s). Since this is likely a common operation, I wrote a blogpost sharing a code snippet that does this for you. If you wish to pursue this even more, I've got another pair of posts along with a video that outlines how to upload files to and download files from Google Drive.
Note that there is also a Google Sheets API v4, but it's primarily for spreadsheet-oriented operations, i.e., inserting data, reading spreadsheet rows, cell formatting, creating charts, adding pivot tables, etc., not file-based request like exporting where the Drive API is the correct one to use.
I wrote a blog post that demos exporting a Google Sheet as CSV from Drive. The core part of the script:
# setup
FILENAME = 'inventory'
SRC_MIMETYPE = 'application/vnd.google-apps.spreadsheet'
DST_MIMETYPE = 'text/csv'
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
# query for file to export
files = DRIVE.files().list(
q='name="%s" and mimeType="%s"' % (FILENAME, SRC_MIMETYPE), orderBy='modifiedTime desc,name').execute().get('files', [])
# export 1st match (if found)
if files:
fn = '%s.csv' % os.path.splitext(files[0]['name'].replace(' ', '_'))[0]
print('Exporting "%s" as "%s"... ' % (files[0]['name'], fn), end='')
data = DRIVE.files().export(fileId=files[0]['id'], mimeType=DST_MIMETYPE).execute()
if data:
with open(fn, 'wb') as f:
f.write(data)
print('DONE')
To learn more about using Google Sheets with Python, see my answer for a similar question. You can also download a Sheet in XLSX and other formats supported by Drive.
If you're completely new to Google APIs, then you need to take a further step back and review these videos first:
How to use Google APIs & create API projects -- the UI has changed but the concepts are still the same
Walkthrough of authorization boilerplate code (Python) -- you can use any supported language to access Google APIs; if you don't do Python, use it as pseudocode to help get you started
Listing your files in Google Drive and code deep dive post
If you already have experience with Google Workspace (formerly G Suite, Google Apps, Google "Docs") APIs and want to see more videos on using both APIs:
Sheets API video library
Drive API video library
Google Workspace (G Suite) Dev Show video series I produced
This no longer works as of gdata 2.0.1.4:
gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
Instead, you have to do:
gd_client.SetClientLoginToken(gdata.gauth.ClientLoginToken(spreadsheets_client.GetClientLoginToken()))
I wrote pygsheets as an alternative to gspread, but using google api v4. It has an export method to export spreadsheet.
import pygsheets
gc = pygsheets.authorize()
# Open spreadsheet and then workseet
sh = gc.open('my new ssheet')
wks = sh.sheet1
#export as csv
wks.export(pygsheets.ExportType.CSV)
The following code works in my case (Ubuntu 10.4, python 2.6.5 gdata 2.0.14)
import gdata.docs.service
import gdata.spreadsheet.service
gd_client = gdata.docs.service.DocsService()
gd_client.ClientLogin(email,password)
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.ClientLogin(email,password)
#...
file_path = file_path.strip()+".xls"
docs_token = gd_client.auth_token
gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
gd_client.Export(entry, file_path)
gd_client.auth_token = docs_token
I've simplified #Cameron's answer even further, by removing the unnecessary object orientation. This makes the code smaller and easier to understand. I also edited the url, which might work better.
#!/usr/bin/python
import re, urllib, urllib2
def get_auth_token(email, password):
url = "https://www.google.com/accounts/ClientLogin"
params = {
"Email": email, "Passwd": password,
"service": 'wise',
"accountType": "HOSTED_OR_GOOGLE",
"source": 'Client'
}
req = urllib2.Request(url, urllib.urlencode(params))
return re.findall(r"Auth=(.*)", urllib2.urlopen(req).read())[0]
def download(spreadsheet, worksheet, email, password, format="csv"):
url_format = 'https://docs.google.com/spreadsheets/d/%s/export?exportFormat=%s#gid=%s'
headers = {
"Authorization": "GoogleLogin auth=" + get_auth_token(email, password),
"GData-Version": "3.0"
}
req = urllib2.Request(url_format % (spreadsheet, format, worksheet), headers=headers)
return urllib2.urlopen(req)
if __name__ == "__main__":
import getpass
import csv
spreadsheet_id = "" # (spreadsheet id here)
worksheet_id = '' # (gid here)
email = "" # (your email here)
password = getpass.getpass()
# Request a file-like object containing the spreadsheet's contents
csv_file = download(spreadsheet_id, worksheet_id, email, password)
# Parse as CSV and print the rows
for row in csv.reader(csv_file):
print ", ".join(row)
I'm using this:
curl 'https://docs.google.com/spreadsheets/d/1-lqLuYJyHAKix-T8NR8wV8ZUUbVOJrZTysccid2-ycs/gviz/tq?tqx=out:csv' on a sheet that is set to publicly readable.
So you would need a python version of curl, if you can work with public sheets.
If you have a sheet with some tabs you don't want to reveal, create a new sheet, and import the ranges you want to publish into tabs on it.
Downloading a spreadsheet from google doc is pretty simple using sheets.
You can follow the detailed documentation on
https://pypi.org/project/gsheets/
or follow the below-given steps. I recommend reading through the documentation for better coverage.
pip install gsheets
Log in to the Google Developers Console with the Google account whose spreadsheets you want to access. Create (or select) a project and enable the Drive API and Sheets API (under Google Apps APIs).
Go to the Credentials for your project and create New credentials > OAuth client ID > of type Other. In the list of your OAuth 2.0 client IDs click Download JSON for the Client ID you just created. Save the file as client_secrets.json in your home directory (user directory).
Use the following code snippet.
from gsheets import Sheets
sheets = Sheets.from_files('client_secret.json')
print(sheets) # will ensure authenticate connection
s = sheets.get("{SPREADSHEET_URL}")
print(s) # will ensure your file is accessible
s.sheets[1].to_csv('Spam.csv', encoding='utf-8', dialect='excel') # will download the file as csv
This isn't a complete answer, but Andreas Kahler wrote up an interesting CMS solution using Google Docs + Google App Engline + Python. Not having any experience in the area, I cannot see exactly what portion of the code may be of use to you, but check it out. I know it interfaces with a Google Docs account and plays with files, so I have a feeling you'll recognize what's going on. It should at least point you in the right direction.
Google AppEngine + Google Docs + Some Python = Simple CMS
Gspread is indeed a big improvement over GoogleCL and Gdata (both of which I've used and thankfully phased out in favor of Gspread). I think that this code is even quicker than the earlier answer to get the contents of the sheet:
username = 'sdfsdfsds#gmail.com'
password = 'sdfsdfsadfsdw'
sheetname = "Sheety Sheet"
client = gspread.login(username, password)
spreadsheet = client.open(sheetname)
worksheet = spreadsheet.sheet1
contents = []
for rows in worksheet.get_all_values():
contents.append(rows)
(Mar 2019, Python 3) My data is usually not sensitive and I use usually table format similar to CSV.
In such case, one can simply publish to the web the sheet and than use it as a CSV file on a server.
(One publishes it using File -> Publish to the web ... -> Sheet 1 -> Comma separated values (.csv) -> Publish).
import csv
import io
import requests
url = "https://docs.google.com/spreadsheets/d/e/<GOOGLE_ID>/pub?gid=0&single=true&output=csv" # you can get the whole link in the 'Publish to the web' dialog
r = requests.get(url)
r.encoding = 'utf-8'
csvio = io.StringIO(r.text, newline="")
data = []
for row in csv.DictReader(csvio):
data.append(row)
Related
Got one shared link from OneDrive hosted by someone that's not me - https://1drv.ms/v/s!AjonToUPWqXmgjO3RqDbhRaSMrOM?e=69dccx
While investigating a solution that would enable to download that file from OneDrive programatically with Python, stumbled accross Office365-REST-Python-Client.
In their GitHub page they've got a specific section dedicated to OneDrive and also some examples of what one can do with it where one closely matches the desired goal.
Installed it using
pip install Office365-REST-Python-Client
but after comparing the code between the example that closely matched my goal realized that had to run instead
pip install office365-rest-client
Changed the initial part to
import os
import tempfile
from office365.graph.graph_client import GraphClient
from office365.settings import settings
which required to create a file in root with the name settings.py
import os
secure_vars = os.environ['office365_python_sdk_securevars'].split(';')
tenant = os.environ.get('office365_python_sdk_tenant', 'mediadev8')
settings = {
'url': 'https://{tenant}.sharepoint.com/'.format(tenant=tenant),
'tenant': '{tenant}.onmicrosoft.com'.format(tenant=tenant),
'redirect_url': 'https://github.com/vgrem/Office365-REST-Python-Client/',
'user_credentials': {
'username': secure_vars[0],
'password': secure_vars[1]
},
'client_credentials': {
'client_id': secure_vars[2],
'client_secret': secure_vars[3],
}
}
Then, adapted the function get_token() to
def get_token(auth_ctx):
"""Acquire token via client credential flow (ADAL Python library is utilized)
:type auth_ctx: adal.AuthenticationContext
"""
url = 'https://1drv.ms/v/s!AjonToUPWqXmgjO3RqDbhRaSMrOM?e=69dccx'
username = 'email'
password = 'password'
token = auth_ctx.acquire_token_for_user(url, username, password)
return token
So, this is the current code
import os
import tempfile
from office365.graph.graph_client import GraphClient
from office365.settings import settings
def get_token(auth_ctx):
"""Acquire token via client credential flow (ADAL Python library is utilized)
:type auth_ctx: adal.AuthenticationContext
"""
url = 'https://1drv.ms/v/s!AjonToUPWqXmgjO3RqDbhRaSMrOM?e=69dccx'
username = 'email' #with my email
password = 'password' #with my password
token = auth_ctx.acquire_token_for_user(url, username, password)
return token
def download_files(remote_folder, local_path):
drive_items = remote_folder.children
client.load(drive_items)
client.execute_query()
for drive_item in drive_items:
if not drive_item.file.is_server_object_null: # is file?
# download file content
with open(os.path.join(local_path, drive_item.name), 'wb') as local_file:
drive_item.download(local_file)
client.execute_query()
print("File '{0}' has been downloaded".format(local_file.name))
# example demonstrates how to export OneDrive files into local file system
# connect
client = GraphClient(settings['tenant'], get_token)
# load drive properties
drive = client.users["tiagomartinsperes#gmail.com"].drive
client.load(drive)
client.execute_query()
# download files from OneDrive
with tempfile.TemporaryDirectory() as path:
download_files(drive.root, path)
print("Done")
Even though these adjustments fixed various problems, still didn't manage to make it work.
When running the code, this is the error I'm getting
File
"C:\Users\tiago\Anaconda3\lib\site-packages\office365\settings.py",
line 3, in
secure_vars = os.environ['office365_python_sdk_securevars'].split(';')
File "C:\Users\tiago\Anaconda3\lib\os.py", line 678, in getitem
raise KeyError(key) from None
KeyError: 'office365_python_sdk_securevars'
Want to use Google's Cloud Vision API for OCR. Using the python sample code here we have:
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
Where do I put my API key? I (obviously) can't authenticate without it.
From the docs,
If you plan to use a service account with client library code, you need to set an environment variable.
Provide the credentials to your application code by setting the environment variable GOOGLE_APPLICATION_CREDENTIALS. Replace [PATH] with the location of the JSON file you downloaded in the previous step.
For example:
export GOOGLE_APPLICATION_CREDENTIALS="/home/user/Downloads/service-account-file.json"
So it looks like you should create a service account, download a credentials file, and set up an environmental variable to point to it.
There are two ways through which you can authenticate
Exporting the credential file as an environment variable. Here is a sample code:
from google.cloud import vision
def get_text_from_image(image_file):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "./creds/xxxx-xxxxx.json"
try:
# process_image is a method to convert numpy array to bytestream
# (not of interest in this context hence not including it here)
byte_img = process_image_to_bytes(image_file)
client = vision.ImageAnnotatorClient()
image = vision.Image(content=byte_img)
response = client.text_detection(image=image)
texts = response.text_annotations
return texts
except BaseException as e:
print(str(e))
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] is doing the work here
Using oauth2
from google.cloud import vision
from google.oauth2 import service_account
def get_text_from_image(image_file):
creds = service_account.Credentials.from_service_account_file("./creds/xxx-xxxxx.json")
try:
# process_image is a method to convert numpy array to bytestream
# (not of interest in this context hence not including it here)
byte_img = process_image_to_bytes(image_file)
client = vision.ImageAnnotatorClient(credentials=creds)
image = vision.Image(content=byte_img)
response = client.text_detection(image=image)
texts = response.text_annotations
return texts
except BaseException as e:
print(str(e))
Here we are using the google-auth library to create a credential file from the JSON credential file and passing that object to ImageAnnotatorClient for authentication.
Hope these sample snippets helped you
I'm using Python 2.6 and the client library for Google API which I am trying to use to get authenticated access to email settings :
f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb')
key = f.read()
f.close()
credentials = client.SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope='https://apps-apis.google.com/a/feeds/emailsettings/2.0/', sub=user_email)
http = httplib2.Http()
http = credentials.authorize(http)
return discovery.build('email-settings', 'v2', http=http)
When I execute this code , I got the follwowing error:
UnknownApiNameOrVersion: name: email-settings version: v2
What's the api name and version for email settingsV2?
Is it possible to use it with service account?
Regards
I found the solution to get email settings using service account oauth2:
Here is a example:
SERVICE_ACCOUNT_EMAIL = ''
SERVICE_ACCOUNT_PKCS12_FILE_PATH = ''
EMAIL_SETTING_URI = "https://apps-apis.google.com/a/feeds/emailsettings/2.0/%s/%s/%s"
def fctEmailSettings():
user_email = "user#mail.com"
f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb')
key = f.read()
f.close()
credentials = client.SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope='https://apps-apis.google.com/a/feeds/emailsettings/2.0/', sub=user_email)
auth2token = OAuth2TokenFromCredentials(credentials)
ESclient = EmailSettingsClient(domain='doamin.com')
auth2token.authorize(ESclient)
username = 'username'
setting='forwarding'
uri = ESclient.MakeEmailSettingsUri(username, setting)
entry = ESclient.get_entry(uri = uri, desired_class = GS.gdata.apps.emailsettings.data.EmailSettingsEntry)
It appears that the emailsettings API is not available using the Discovery API. The APIs Discovery service returns back details of an API - what methods are available, etc.
See the following issue raised on the PHP client API
https://github.com/google/google-api-php-client/issues/246
I'm unclear as to why the emailsettings is not available via the discovery API or whether there are plans to do so. Really it feels like a lot of these systems and libraries are unmaintained.
The deprecated gdata client library does have support. Try the following example, which I can confirm works ok.
https://code.google.com/p/gdata-python-client/source/browse/samples/apps/emailsettings_example.py
In case you have multiple entry points in your app that need to access the EmailSettings API, here's a re-usable function that returns a "client" object:
def google_get_emailsettings_credentials():
'''
Google's EmailSettings API is not yet service-based, so delegation data
has to be accessed differently from our other Google functions.
TODO: Refactor when API is updated.
'''
with open(settings.GOOGLE_PATH_TO_KEYFILE) as f:
private_key = f.read()
client = EmailSettingsClient(domain='example.com')
credentials = SignedJwtAssertionCredentials(
settings.GOOGLE_CLIENT_EMAIL,
private_key,
scope='https://apps-apis.google.com/a/feeds/emailsettings/2.0/',
sub=settings.GOOGLE_SUB_USER)
auth2token = gdata.gauth.OAuth2TokenFromCredentials(credentials)
auth2token.authorize(client)
return client
It can then be called from elsewhere, e.g. to reach the DelegationFeed:
client = google_get_emailsettings_credentials()
uri = client.MakeEmailSettingsUri(username, 'delegation')
delegates_xml = client.get_entry(
uri=uri,
desired_class=gdata.apps.emailsettings.data.EmailSettingsDelegationFeed)
I have created a S3 bucket, uploaded a video, created a streaming distribution in CloudFront. Tested it with a static HTML player and it works. I have created a keypair through the account settings. I have the private key file sitting on my desktop at the moment. That's where I am.
My aim is to get to a point where my Django/Python site creates secure URLs and people can't access the videos unless they've come from one of my pages. The problem is I'm allergic to the way Amazon have laid things out and I'm just getting more and more confused.
I realise this isn't going to be the best question on StackOverflow but I'm certain I can't be the only fool out here that can't make heads or tails out of how to set up a secure CloudFront/S3 situation. I would really appreciate your help and am willing (once two days has passed) give a 500pt bounty to the best answer.
I have several questions that, once answered, should fit into one explanation of how to accomplish what I'm after:
In the documentation (there's an example in the next point) there's lots of XML lying around telling me I need to POST things to various places. Is there an online console for doing this? Or do I literally have to force this up via cURL (et al)?
How do I create a Origin Access Identity for CloudFront and bind it to my distribution? I've read this document but, per the first point, don't know what to do with it. How does my keypair fit into this?
Once that's done, how do I limit the S3 bucket to only allow people to download things through that identity? If this is another XML jobby rather than clicking around the web UI, please tell me where and how I'm supposed to get this into my account.
In Python, what's the easiest way of generating an expiring URL for a file. I have boto installed but I don't see how to get a file from a streaming distribution.
Are there are any applications or scripts that can take the difficulty of setting this garb up? I use Ubuntu (Linux) but I have XP in a VM if it's Windows-only. I've already looked at CloudBerry S3 Explorer Pro - but it makes about as much sense as the online UI.
You're right, it takes a lot of API work to get this set up. I hope they get it in the AWS Console soon!
UPDATE: I have submitted this code to boto - as of boto v2.1 (released 2011-10-27) this gets much easier. For boto < 2.1, use the instructions here. For boto 2.1 or greater, get the updated instructions on my blog: http://www.secretmike.com/2011/10/aws-cloudfront-secure-streaming.html Once boto v2.1 gets packaged by more distros I'll update the answer here.
To accomplish what you want you need to perform the following steps which I will detail below:
Create your s3 bucket and upload some objects (you've already done this)
Create a Cloudfront "Origin Access Identity" (basically an AWS account to allow cloudfront to access your s3 bucket)
Modify the ACLs on your objects so that only your Cloudfront Origin Access Identity is allowed to read them (this prevents people from bypassing Cloudfront and going direct to s3)
Create a cloudfront distribution with basic URLs and one which requires signed URLs
Test that you can download objects from basic cloudfront distribution but not from s3 or the signed cloudfront distribution
Create a key pair for signing URLs
Generate some URLs using Python
Test that the signed URLs work
1 - Create Bucket and upload object
The easiest way to do this is through the AWS Console but for completeness I'll show how using boto. Boto code is shown here:
import boto
#credentials stored in environment AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
s3 = boto.connect_s3()
#bucket name MUST follow dns guidelines
new_bucket_name = "stream.example.com"
bucket = s3.create_bucket(new_bucket_name)
object_name = "video.mp4"
key = bucket.new_key(object_name)
key.set_contents_from_filename(object_name)
2 - Create a Cloudfront "Origin Access Identity"
For now, this step can only be performed using the API. Boto code is here:
import boto
#credentials stored in environment AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
cf = boto.connect_cloudfront()
oai = cf.create_origin_access_identity(comment='New identity for secure videos')
#We need the following two values for later steps:
print("Origin Access Identity ID: %s" % oai.id)
print("Origin Access Identity S3CanonicalUserId: %s" % oai.s3_user_id)
3 - Modify the ACLs on your objects
Now that we've got our special S3 user account (the S3CanonicalUserId we created above) we need to give it access to our s3 objects. We can do this easily using the AWS Console by opening the object's (not the bucket's!) Permissions tab, click the "Add more permissions" button, and pasting the very long S3CanonicalUserId we got above into the "Grantee" field of a new. Make sure you give the new permission "Open/Download" rights.
You can also do this in code using the following boto script:
import boto
#credentials stored in environment AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
s3 = boto.connect_s3()
bucket_name = "stream.example.com"
bucket = s3.get_bucket(bucket_name)
object_name = "video.mp4"
key = bucket.get_key(object_name)
#Now add read permission to our new s3 account
s3_canonical_user_id = "<your S3CanonicalUserID from above>"
key.add_user_grant("READ", s3_canonical_user_id)
4 - Create a cloudfront distribution
Note that custom origins and private distributions are not fully supported in boto until version 2.0 which has not been formally released at time of writing. The code below pulls out some code from the boto 2.0 branch and hacks it together to get it going but it's not pretty. The 2.0 branch handles this much more elegantly - definitely use that if possible!
import boto
from boto.cloudfront.distribution import DistributionConfig
from boto.cloudfront.exception import CloudFrontServerError
import re
def get_domain_from_xml(xml):
results = re.findall("<DomainName>([^<]+)</DomainName>", xml)
return results[0]
#custom class to hack this until boto v2.0 is released
class HackedStreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None):
DistributionConfig.__init__(self, connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers)
#override the to_xml() function
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
s += ' <S3Origin>\n'
s += ' <DNSName>%s</DNSName>\n' % self.origin
if self.origin_access_identity:
val = self.origin_access_identity
s += ' <OriginAccessIdentity>origin-access-identity/cloudfront/%s</OriginAccessIdentity>\n' % val
s += ' </S3Origin>\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
def create(self):
response = self.connection.make_request('POST',
'/%s/%s' % ("2010-11-01", "streaming-distribution"),
{'Content-Type' : 'text/xml'},
data=self.to_xml())
body = response.read()
if response.status == 201:
return body
else:
raise CloudFrontServerError(response.status, response.reason, body)
cf = boto.connect_cloudfront()
s3_dns_name = "stream.example.com.s3.amazonaws.com"
comment = "example streaming distribution"
oai = "<OAI ID from step 2 above like E23KRHS6GDUF5L>"
#Create a distribution that does NOT need signed URLS
hsd = HackedStreamingDistributionConfig(connection=cf, origin=s3_dns_name, comment=comment, enabled=True)
hsd.origin_access_identity = oai
basic_dist = hsd.create()
print("Distribution with basic URLs: %s" % get_domain_from_xml(basic_dist))
#Create a distribution that DOES need signed URLS
hsd = HackedStreamingDistributionConfig(connection=cf, origin=s3_dns_name, comment=comment, enabled=True)
hsd.origin_access_identity = oai
#Add some required signers (Self means your own account)
hsd.trusted_signers = ['Self']
signed_dist = hsd.create()
print("Distribution with signed URLs: %s" % get_domain_from_xml(signed_dist))
5 - Test that you can download objects from cloudfront but not from s3
You should now be able to verify:
stream.example.com.s3.amazonaws.com/video.mp4 - should give AccessDenied
signed_distribution.cloudfront.net/video.mp4 - should give MissingKey (because the URL is not signed)
basic_distribution.cloudfront.net/video.mp4 - should work fine
The tests will have to be adjusted to work with your stream player, but the basic idea is that only the basic cloudfront url should work.
6 - Create a keypair for CloudFront
I think the only way to do this is through Amazon's web site. Go into your AWS "Account" page and click on the "Security Credentials" link. Click on the "Key Pairs" tab then click "Create a New Key Pair". This will generate a new key pair for you and automatically download a private key file (pk-xxxxxxxxx.pem). Keep the key file safe and private. Also note down the "Key Pair ID" from amazon as we will need it in the next step.
7 - Generate some URLs in Python
As of boto version 2.0 there does not seem to be any support for generating signed CloudFront URLs. Python does not include RSA encryption routines in the standard library so we will have to use an additional library. I've used M2Crypto in this example.
For a non-streaming distribution, you must use the full cloudfront URL as the resource, however for streaming we only use the object name of the video file. See the code below for a full example of generating a URL which only lasts for 5 minutes.
This code is based loosely on the PHP example code provided by Amazon in the CloudFront documentation.
from M2Crypto import EVP
import base64
import time
def aws_url_base64_encode(msg):
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
def sign_string(message, priv_key_string):
key = EVP.load_key_string(priv_key_string)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature
def create_url(url, encoded_signature, key_pair_id, expires):
signed_url = "%(url)s?Expires=%(expires)s&Signature=%(encoded_signature)s&Key-Pair-Id=%(key_pair_id)s" % {
'url':url,
'expires':expires,
'encoded_signature':encoded_signature,
'key_pair_id':key_pair_id,
}
return signed_url
def get_canned_policy_url(url, priv_key_string, key_pair_id, expires):
#we manually construct this policy string to ensure formatting matches signature
canned_policy = '{"Statement":[{"Resource":"%(url)s","Condition":{"DateLessThan":{"AWS:EpochTime":%(expires)s}}}]}' % {'url':url, 'expires':expires}
#now base64 encode it (must be URL safe)
encoded_policy = aws_url_base64_encode(canned_policy)
#sign the non-encoded policy
signature = sign_string(canned_policy, priv_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = aws_url_base64_encode(signature)
#combine these into a full url
signed_url = create_url(url, encoded_signature, key_pair_id, expires);
return signed_url
def encode_query_param(resource):
enc = resource
enc = enc.replace('?', '%3F')
enc = enc.replace('=', '%3D')
enc = enc.replace('&', '%26')
return enc
#Set parameters for URL
key_pair_id = "APKAIAZCZRKVIO4BQ" #from the AWS accounts page
priv_key_file = "cloudfront-pk.pem" #your private keypair file
resource = 'video.mp4' #your resource (just object name for streaming videos)
expires = int(time.time()) + 300 #5 min
#Create the signed URL
priv_key_string = open(priv_key_file).read()
signed_url = get_canned_policy_url(resource, priv_key_string, key_pair_id, expires)
#Flash player doesn't like query params so encode them
enc_url = encode_query_param(signed_url)
print(enc_url)
8 - Try out the URLs
Hopefully you should now have a working URL which looks something like this:
video.mp4%3FExpires%3D1309979985%26Signature%3DMUNF7pw1689FhMeSN6JzQmWNVxcaIE9mk1x~KOudJky7anTuX0oAgL~1GW-ON6Zh5NFLBoocX3fUhmC9FusAHtJUzWyJVZLzYT9iLyoyfWMsm2ylCDBqpy5IynFbi8CUajd~CjYdxZBWpxTsPO3yIFNJI~R2AFpWx8qp3fs38Yw_%26Key-Pair-Id%3DAPKAIAZRKVIO4BQ
Put this into your js and you should have something which looks like this (from the PHP example in Amazon's CloudFront documentation):
var so_canned = new SWFObject('http://location.domname.com/~jvngkhow/player.swf','mpl','640','360','9');
so_canned.addParam('allowfullscreen','true');
so_canned.addParam('allowscriptaccess','always');
so_canned.addParam('wmode','opaque');
so_canned.addVariable('file','video.mp4%3FExpires%3D1309979985%26Signature%3DMUNF7pw1689FhMeSN6JzQmWNVxcaIE9mk1x~KOudJky7anTuX0oAgL~1GW-ON6Zh5NFLBoocX3fUhmC9FusAHtJUzWyJVZLzYT9iLyoyfWMsm2ylCDBqpy5IynFbi8CUajd~CjYdxZBWpxTsPO3yIFNJI~R2AFpWx8qp3fs38Yw_%26Key-Pair-Id%3DAPKAIAZRKVIO4BQ');
so_canned.addVariable('streamer','rtmp://s3nzpoyjpct.cloudfront.net/cfx/st');
so_canned.write('canned');
Summary
As you can see, not very easy! boto v2 will help a lot setting up the distribution. I will find out if it's possible to get some URL generation code in there as well to improve this great library!
In Python, what's the easiest way of generating an expiring URL for a file. I have boto installed but I don't see how to get a file from a streaming distribution.
You can generate a expiring signed-URL for the resource. Boto3 documentation has a nice example solution for that:
import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from botocore.signers import CloudFrontSigner
def rsa_signer(message):
with open('path/to/key.pem', 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(message)
return signer.finalize()
key_id = 'AKIAIOSFODNN7EXAMPLE'
url = 'http://d2949o5mkkp72v.cloudfront.net/hello.txt'
expire_date = datetime.datetime(2017, 1, 1)
cloudfront_signer = CloudFrontSigner(key_id, rsa_signer)
# Create a signed url that will be valid until the specfic expiry date
# provided using a canned policy.
signed_url = cloudfront_signer.generate_presigned_url(
url, date_less_than=expire_date)
print(signed_url)
Can a Python script upload a photo to photo bucket and then retrieve the URL for it? Is so how?
I found a script at this link: http://www.democraticunderground.com/discuss/duboard.php?az=view_all&address=240x677
But I just found that confusing.
many thanks,
Phil
Yes, you can. Photobucket has a well-documented API, and someone wrote a wrapper around it.
Download the it and put it into your Python path, then download httplib2 (you can use easy_install or pip for this one).
Then, you have to request a key for the Photobucket API.
If you did everything right, you can write your script now. The Python wrapper is great, but is not documented at all which makes it very difficult to use it. I spent hours on understanding it (compare the question and response time here). As example, the script even has form/multipart support, but I had to read the code to find out how to use it. I had to prefix the filename with a #.
This library is a great example how you should NOT document your code!
I finally got it working, enjoy the script: (it even has oAuth handling!)
import pbapi
import webbrowser
import cPickle
import os
import re
import sys
from xml.etree import ElementTree
__author__ = "leoluk"
###############################################
## CONFIGURATION ##
###############################################
# File in which the oAuth token will be stored
TOKEN_FILE = "token.txt"
IMAGE_PATH = r"D:\Eigene Dateien\Bilder\SC\foo.png"
IMAGE_RECORD = {
"type": 'image',
"uploadfile": '#'+IMAGE_PATH,
"title": "My title", # <---
"description": "My description", # <---
}
ALBUM_NAME = None # default album if None
API_KEY = "149[..]"
API_SECRET = "528[...]"
###############################################
## SCRIPT ##
###############################################
api = pbapi.PbApi(API_KEY, API_SECRET)
api.pb_request.connection.cache = None
# Test if service online
api.reset().ping().post()
result = api.reset().ping().post().response_string
ET = ElementTree.fromstring(result)
if ET.find('status').text != 'OK':
sys.stderr.write("error: Ping failed \n"+result)
sys.exit(-1)
try:
# If there is already a saved oAuth token, no need for a new one
api.username, api.pb_request.oauth_token = cPickle.load(open(TOKEN_FILE))
except (ValueError, KeyError, IOError, TypeError):
# If error, there's no valid oAuth token
# Getting request token
api.reset().login().request().post().load_token_from_response()
# Requesting user permission (you have to login with your account)
webbrowser.open_new_tab(api.login_url)
raw_input("Press Enter when you finished access permission. ")
#Getting oAuth token
api.reset().login().access().post().load_token_from_response()
# This is needed for getting the right subdomain
infos = api.reset().album(api.username).url().get().response_string
ET = ElementTree.fromstring(infos)
if ET.find('status').text != 'OK':
# Remove the invalid oAuth
os.remove(TOKEN_FILE)
# This happend is user deletes the oAuth permission online
sys.stderr.write("error: Permission deleted. Please re-run.")
sys.exit(-1)
# Fresh values for username and subdomain
api.username = ET.find('content/username').text
api.set_subdomain(ET.find('content/subdomain/api').text)
# Default album name
if not ALBUM_NAME:
ALBUM_NAME = api.username
# Debug :-)
print "User: %s" % api.username
# Save the new, valid oAuth token
cPickle.dump((api.username, api.oauth_token), open(TOKEN_FILE, 'w'))
# Posting the image
result = (api.reset().album(ALBUM_NAME).
upload(IMAGE_RECORD).post().response_string)
ET = ElementTree.fromstring(result)
if ET.find('status').text != 'OK':
sys.stderr.write("error: File upload failed \n"+result)
sys.exit(-1)
# Now, as an example what you could do now, open the image in the browser
webbrowser.open_new_tab(ET.find('content/browseurl').text)
Use the python API by Ron White that was written to do just this