Importing an image from Python (raspberry pi) to Firebase - python

For a project in which we created an app that records certain scores throughout the day, we also created some graphs in R which we saved as jpegs on the Raspberry.
We want to upload the jpg to Firebase via Python (we uploaded a variable to Firebase and it worked)
We first tried this code:
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket('teddy-aztech-ehealth.appspot.com')
graphicBlob = bucket.get_blob('graph.jpeg')
graphBlob.upload_from_filename(filename='/home/pi/graph.jpeg')
But we get a long error from the client bucket part , telling us the bucket name must start and end with a number.
We also tried this code:
import sys
import requests
import firebase_admin
from firebase_admin import credentials
from firebase_admin import storage
sys.argv = "/home/pi/graph.jpeg"
image_url = sys.argv
cred = credentials.Certificate('teddy-aztech-ehealth-firebase-adminsdk-t0iz1-61f49237f4.json')
firebase_admin.initialize_app(cred, {
'storageBucket': 'https://teddy-aztech-ehealth.appspot.com'
})
bucket = storage.bucket()
image_data = requests.get(image_url).content
blob = bucket.blob('graph.jpg')
blob.upload_from_string(
image_data,
content_type='image/jpg'
)
print(blob.public_url)
But get an error at the part with initializeapp (again, because of the bucket...)
Do we have to activate/give access from Firebase?

Your initial attempt is close to what you need.
import io
from google.cloud import storage
# Google Cloud Project ID. This can be found on the 'Overview' page at
# https://console.developers.google.com
PROJECT_ID = 'your-project-id'
CLOUD_STORAGE_BUCKET = 'your-bucket-name'
filename = "graph-filename.jpeg"
# Create unique filename to avoid name collisions in Google Cloud Storage
date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S")
basename, extension = filename.rsplit('.', 1)
unique_filename = "{0}-{1}.{2}".format(basename, date, extension)
# Instantiate a client on behalf of the project
client = storage.Client(project=PROJECT_ID)
# Instantiate a bucket
bucket = client.bucket(CLOUD_STORAGE_BUCKET)
# Instantiate a blob
blob = bucket.blob(unique_filename)
# Upload the file
with open(filename, "rb") as fp:
blob.upload_from_file(fp)
# The public URL for this blob
url = blob.public_url

Related

How can we upload multiple images using presigned_post url in s3

I am trying to upload multiple images in s3 from react application using aws api gateway.
I have tried below approach:
Setup api gateway which target to lambda function.
lambda function code:
import json
import boto3
def lambda_handler(event, context):
print(event)
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'testimagesbucketupload'
URL = s3.generate_presigned_post(
Bucket= bucket_name,
Key="${filename}",
# Conditions=[
# ["starts-with", "$success_action_redirect", ""],
# ["eq", "$userid", "test"],
# ],
ExpiresIn=3600)
data = {"url": URL['url'], "fields": URL['fields']}
print(type(data))
# print(data)
return data
Using above code i am able to upload single image from web and postman both but now i want to upload multiple image using this url and also want to retrieve image for preview..
If any one worked please help me
Thanks in advance..
I tried presigned_post and presigned-url for achieve this but still i am not able to achieve this
You'll need to create one url for image, but you can use a loop to create all of them. I think something like this could work for you
import boto3
def lambda_handler(event, context):
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'testimagesbucketupload'
image_list = event['image_list']
data = []
for image in image_list:
URL = s3.generate_presigned_post(
Bucket= bucket_name,
Key=image,
ExpiresIn=3600)
data.append({"url": URL['url'], "fields": URL['fields']})
return data
Note that you need to pass a list of images in the event
For the preview, you could use a presigned url to return the image as a public url...
from botocore.client import Config
import boto3
s3 = boto3.client('s3', config=Config(signature_version='s3v4'), region_name = "your_region")
presigned_url = s3.generate_presigned_url('get_object',
Params={'Bucket': "your_bucket",
'Key': "your_file_key"},
ExpiresIn=3600)
presigned_url

How do I directly save images from a url to my aws bucket (python / boto3)? [duplicate]

I'm working in a Python web environment and I can simply upload a file from the filesystem to S3 using boto's key.set_contents_from_filename(path/to/file). However, I'd like to upload an image that is already on the web (say https://pbs.twimg.com/media/A9h_htACIAAaCf6.jpg:large).
Should I somehow download the image to the filesystem, and then upload it to S3 using boto as usual, then delete the image?
What would be ideal is if there is a way to get boto's key.set_contents_from_file or some other command that would accept a URL and nicely stream the image to S3 without having to explicitly download a file copy to my server.
def upload(url):
try:
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = settings.AWS_STORAGE_BUCKET_NAME
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = "test"
k.set_contents_from_file(url)
k.make_public()
return "Success?"
except Exception, e:
return e
Using set_contents_from_file, as above, I get a "string object has no attribute 'tell'" error. Using set_contents_from_filename with the url, I get a No such file or directory error . The boto storage documentation leaves off at uploading local files and does not mention uploading files stored remotely.
Here is how I did it with requests, the key being to set stream=True when initially making the request, and uploading to s3 using the upload.fileobj() method:
import requests
import boto3
url = "https://upload.wikimedia.org/wikipedia/en/a/a9/Example.jpg"
r = requests.get(url, stream=True)
session = boto3.Session()
s3 = session.resource('s3')
bucket_name = 'your-bucket-name'
key = 'your-key-name' # key is the name of file on your bucket
bucket = s3.Bucket(bucket_name)
bucket.upload_fileobj(r.raw, key)
Ok, from #garnaat, it doesn't sound like S3 currently allows uploads by url. I managed to upload remote images to S3 by reading them into memory only. This works.
def upload(url):
try:
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = settings.AWS_STORAGE_BUCKET_NAME
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = url.split('/')[::-1][0] # In my situation, ids at the end are unique
file_object = urllib2.urlopen(url) # 'Like' a file object
fp = StringIO.StringIO(file_object.read()) # Wrap object
k.set_contents_from_file(fp)
return "Success"
except Exception, e:
return e
Also thanks to How can I create a GzipFile instance from the “file-like object” that urllib.urlopen() returns?
For a 2017-relevant answer to this question which uses the official 'boto3' package (instead of the old 'boto' package from the original answer):
Python 3.5
If you're on a clean Python install, pip install both packages first:
pip install boto3
pip install requests
import boto3
import requests
# Uses the creds in ~/.aws/credentials
s3 = boto3.resource('s3')
bucket_name_to_upload_image_to = 'photos'
s3_image_filename = 'test_s3_image.png'
internet_image_url = 'https://docs.python.org/3.7/_static/py.png'
# Do this as a quick and easy check to make sure your S3 access is OK
for bucket in s3.buckets.all():
if bucket.name == bucket_name_to_upload_image_to:
print('Good to go. Found the bucket to upload the image into.')
good_to_go = True
if not good_to_go:
print('Not seeing your s3 bucket, might want to double check permissions in IAM')
# Given an Internet-accessible URL, download the image and upload it to S3,
# without needing to persist the image to disk locally
req_for_image = requests.get(internet_image_url, stream=True)
file_object_from_req = req_for_image.raw
req_data = file_object_from_req.read()
# Do the actual upload to s3
s3.Bucket(bucket_name_to_upload_image_to).put_object(Key=s3_image_filename, Body=req_data)
Unfortunately, there really isn't any way to do this. At least not at the moment. We could add a method to boto, say set_contents_from_url, but that method would still have to download the file to the local machine and then upload it. It might still be a convenient method but it wouldn't save you anything.
In order to do what you really want to do, we would need to have some capability on the S3 service itself that would allow us to pass it the URL and have it store the URL to a bucket for us. That sounds like a pretty useful feature. You might want to post that to the S3 forums.
A simple 3-lines implementation that works on a lambda out-of-the-box:
import boto3
import requests
s3_object = boto3.resource('s3').Object(bucket_name, object_key)
with requests.get(url, stream=True) as r:
s3_object.put(Body=r.content)
The source for the .get part comes straight from the requests documentation
from io import BytesIO
def send_image_to_s3(url, name):
print("sending image")
bucket_name = 'XXX'
AWS_SECRET_ACCESS_KEY = "XXX"
AWS_ACCESS_KEY_ID = "XXX"
s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
response = requests.get(url)
img = BytesIO(response.content)
file_name = f'path/{name}'
print('sending {}'.format(file_name))
r = s3.upload_fileobj(img, bucket_name, file_name)
s3_path = 'path/' + name
return s3_path
I have tried as following with boto3 and it works me:
import boto3;
import contextlib;
import requests;
from io import BytesIO;
s3 = boto3.resource('s3');
s3Client = boto3.client('s3')
for bucket in s3.buckets.all():
print(bucket.name)
url = "#resource url";
with contextlib.closing(requests.get(url, stream=True, verify=False)) as response:
# Set up file stream from response content.
fp = BytesIO(response.content)
# Upload data to S3
s3Client.upload_fileobj(fp, 'aws-books', 'reviews_Electronics_5.json.gz')
Using the boto3 upload_fileobj method, you can stream a file to an S3 bucket, without saving to disk. Here is my function:
import boto3
import StringIO
import contextlib
import requests
def upload(url):
# Get the service client
s3 = boto3.client('s3')
# Rember to se stream = True.
with contextlib.closing(requests.get(url, stream=True, verify=False)) as response:
# Set up file stream from response content.
fp = StringIO.StringIO(response.content)
# Upload data to S3
s3.upload_fileobj(fp, 'my-bucket', 'my-dir/' + url.split('/')[-1])
S3 doesn't support remote upload as of now it seems. You may use the below class for uploading an image to S3. The upload method here first tries to download the image and keeps it in memory for sometime until it gets uploaded. To be able to connect to S3 you will have to install AWS CLI using command pip install awscli, then enter few credentials using command aws configure:
import urllib3
import uuid
from pathlib import Path
from io import BytesIO
from errors import custom_exceptions as cex
BUCKET_NAME = "xxx.yyy.zzz"
POSTERS_BASE_PATH = "assets/wallcontent"
CLOUDFRONT_BASE_URL = "https://xxx.cloudfront.net/"
class S3(object):
def __init__(self):
self.client = boto3.client('s3')
self.bucket_name = BUCKET_NAME
self.posters_base_path = POSTERS_BASE_PATH
def __download_image(self, url):
manager = urllib3.PoolManager()
try:
res = manager.request('GET', url)
except Exception:
print("Could not download the image from URL: ", url)
raise cex.ImageDownloadFailed
return BytesIO(res.data) # any file-like object that implements read()
def upload_image(self, url):
try:
image_file = self.__download_image(url)
except cex.ImageDownloadFailed:
raise cex.ImageUploadFailed
extension = Path(url).suffix
id = uuid.uuid1().hex + extension
final_path = self.posters_base_path + "/" + id
try:
self.client.upload_fileobj(image_file,
self.bucket_name,
final_path
)
except Exception:
print("Image Upload Error for URL: ", url)
raise cex.ImageUploadFailed
return CLOUDFRONT_BASE_URL + id
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from urllib import urlopen
def upload_images_s3(img_url):
try:
connection = boto.connect_s3('access_key', 'secret_key', calling_format=OrdinaryCallingFormat())
bucket = connection.get_bucket('boto-demo-1519388451')
file_obj = Key(bucket)
file_obj.key = img_url.split('/')[::-1][0]
fp = urlopen(img_url)
result = file_obj.set_contents_from_string(fp.read())
except Exception, e:
return e

http post request to API with azure blob storage

I'm trying to make an http post request with Microsoft's face api, in order to connect it with photos in my azure blob storage account. When I run the following code, I get multiple errors like handshake error, or ssl routines type errors. I appreciate any help! The problem code is :
api_response = requests.post(url, headers=headers, data=blob)
obviously for context here is what I ran before that. This first chunk sets up the storage account:
%matplotlib inline
import matplotlib.pyplot as plt
import io
from io import StringIO
import numpy as np
import cv2
from PIL import Image
from PIL import Image
import os
from array import array
azure_storage_account_name = 'musicsurveyphotostorage'
azure_storage_account_key = None #dont need key... we will access public blob...
if azure_storage_account_name is None:
raise Exception("You must provide a name for an Azure Storage account")
from azure.storage.blob import BlockBlobService
blob_service = BlockBlobService(azure_storage_account_name, azure_storage_account_key)
# select container (folder) name where the files resides
container_name = 'musicsurveyphotostorage'
# list files in the selected folder
generator = blob_service.list_blobs(container_name)
blob_prefix = 'https://{0}.blob.core.windows.net/{1}/{2}'
# load image file to process
blob_name = 'shiba.jpg' #name of image I have stored
blob = blob_service.get_blob_to_bytes(container_name, blob_name)
image_file_in_mem = io.BytesIO(blob.content)
img_bytes = Image.open(image_file_in_mem)
This second chunk calls out the API and the problematic post request:
#CALL OUT THE API
import requests
import urllib
url_face_api = 'https://eastus.api.cognitive.microsoft.com/face/v1.0'
api_key ='____'
#WHICH PARAMETERS ATTRIBUTES DO YOU WANT RETURNED
headers = {'Content-Type': 'application/octet-stream', 'Ocp-Apim-
Subscription-Key':api_key}
params = urllib.parse.urlencode({
'returnFaceId': 'true',
'returnFaceLandmarks': 'true',
'returnFaceAttributes': 'age,gender,smile,facialHair,headPose,glasses',
})
query_string = '?{0}'.format(params)
url = url_face_api + query_string
#THIS IS THE PROBLEM CODE
api_response = requests.post(url, headers=headers, data=blob)
#print out output in json
import json
res_json = json.loads(api_response.content.decode('utf-8'))
print(json.dumps(res_json, indent=2, sort_keys=True))
If I open the Fiddler, I also could reproduce the issue that you mentioned. If it is that case, you could pause to capture the request with fiddler during send request.
Based on my test, in your code there are 2 code lines need to be changed. From more information you could refer to the screenshot.
We also could get the some demo code from azure offical document.
url_face_api = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect' # in your case miss detect
api_response = requests.post(url, headers=headers,data=blob.content) # data should be blob.content

Upload raw JSON data on Google Cloud Storage using Python code

I am trying to upload raw JSON data on Google Cloud Platform. But I am getting this error:
TypeError: must be string or buffer, not dict
Code:
def upload_data(destination_path):
credentials = GoogleCredentials.get_application_default()
service = discovery.build('storage', 'v1', credentials=credentials)
content = {'name': 'test'}
media = http.MediaIoBaseUpload(StringIO(content), mimetype='plain/text')
req = service.objects().insert(
bucket=settings.GOOGLE_CLOUD_STORAGE_BUCKET,
body={"cacheControl": "public,max-age=31536000"},
media_body=media,
predefinedAcl='publicRead',
name=destination_path,
)
resp = req.execute()
return resp
Code Worked by changing StringIO(content) to StringIO(json.dumps(content))
in your example, content is a dict. Perhaps you want to use json?
content = json.dumps({'name': 'test'})
To answer my own comment question on how to get this to work:
To get this to work you'll need
from googleapiclient.discovery import build
from googleapiclient import http
from oauth2client.client import GoogleCredentials
import io
Also i needed to change this:
media = http.MediaIoBaseUpload(StringIO(content), mimetype='plain/text')
in to this:
media = http.MediaIoBaseUpload(io.StringIO(json.dumps(content)), mimetype='plain/text')
Note the insert of 'io' besides the json.dumps recommended by Corey Goldberg.
for predefinedAcl options you can go here:
https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
Bucket needs to be the full name of your bucket. If you come from firebase it's "[name].appspot.com"

Upload image available at public URL to S3 using boto

I'm working in a Python web environment and I can simply upload a file from the filesystem to S3 using boto's key.set_contents_from_filename(path/to/file). However, I'd like to upload an image that is already on the web (say https://pbs.twimg.com/media/A9h_htACIAAaCf6.jpg:large).
Should I somehow download the image to the filesystem, and then upload it to S3 using boto as usual, then delete the image?
What would be ideal is if there is a way to get boto's key.set_contents_from_file or some other command that would accept a URL and nicely stream the image to S3 without having to explicitly download a file copy to my server.
def upload(url):
try:
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = settings.AWS_STORAGE_BUCKET_NAME
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = "test"
k.set_contents_from_file(url)
k.make_public()
return "Success?"
except Exception, e:
return e
Using set_contents_from_file, as above, I get a "string object has no attribute 'tell'" error. Using set_contents_from_filename with the url, I get a No such file or directory error . The boto storage documentation leaves off at uploading local files and does not mention uploading files stored remotely.
Here is how I did it with requests, the key being to set stream=True when initially making the request, and uploading to s3 using the upload.fileobj() method:
import requests
import boto3
url = "https://upload.wikimedia.org/wikipedia/en/a/a9/Example.jpg"
r = requests.get(url, stream=True)
session = boto3.Session()
s3 = session.resource('s3')
bucket_name = 'your-bucket-name'
key = 'your-key-name' # key is the name of file on your bucket
bucket = s3.Bucket(bucket_name)
bucket.upload_fileobj(r.raw, key)
Ok, from #garnaat, it doesn't sound like S3 currently allows uploads by url. I managed to upload remote images to S3 by reading them into memory only. This works.
def upload(url):
try:
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = settings.AWS_STORAGE_BUCKET_NAME
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = url.split('/')[::-1][0] # In my situation, ids at the end are unique
file_object = urllib2.urlopen(url) # 'Like' a file object
fp = StringIO.StringIO(file_object.read()) # Wrap object
k.set_contents_from_file(fp)
return "Success"
except Exception, e:
return e
Also thanks to How can I create a GzipFile instance from the “file-like object” that urllib.urlopen() returns?
For a 2017-relevant answer to this question which uses the official 'boto3' package (instead of the old 'boto' package from the original answer):
Python 3.5
If you're on a clean Python install, pip install both packages first:
pip install boto3
pip install requests
import boto3
import requests
# Uses the creds in ~/.aws/credentials
s3 = boto3.resource('s3')
bucket_name_to_upload_image_to = 'photos'
s3_image_filename = 'test_s3_image.png'
internet_image_url = 'https://docs.python.org/3.7/_static/py.png'
# Do this as a quick and easy check to make sure your S3 access is OK
for bucket in s3.buckets.all():
if bucket.name == bucket_name_to_upload_image_to:
print('Good to go. Found the bucket to upload the image into.')
good_to_go = True
if not good_to_go:
print('Not seeing your s3 bucket, might want to double check permissions in IAM')
# Given an Internet-accessible URL, download the image and upload it to S3,
# without needing to persist the image to disk locally
req_for_image = requests.get(internet_image_url, stream=True)
file_object_from_req = req_for_image.raw
req_data = file_object_from_req.read()
# Do the actual upload to s3
s3.Bucket(bucket_name_to_upload_image_to).put_object(Key=s3_image_filename, Body=req_data)
Unfortunately, there really isn't any way to do this. At least not at the moment. We could add a method to boto, say set_contents_from_url, but that method would still have to download the file to the local machine and then upload it. It might still be a convenient method but it wouldn't save you anything.
In order to do what you really want to do, we would need to have some capability on the S3 service itself that would allow us to pass it the URL and have it store the URL to a bucket for us. That sounds like a pretty useful feature. You might want to post that to the S3 forums.
A simple 3-lines implementation that works on a lambda out-of-the-box:
import boto3
import requests
s3_object = boto3.resource('s3').Object(bucket_name, object_key)
with requests.get(url, stream=True) as r:
s3_object.put(Body=r.content)
The source for the .get part comes straight from the requests documentation
from io import BytesIO
def send_image_to_s3(url, name):
print("sending image")
bucket_name = 'XXX'
AWS_SECRET_ACCESS_KEY = "XXX"
AWS_ACCESS_KEY_ID = "XXX"
s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
response = requests.get(url)
img = BytesIO(response.content)
file_name = f'path/{name}'
print('sending {}'.format(file_name))
r = s3.upload_fileobj(img, bucket_name, file_name)
s3_path = 'path/' + name
return s3_path
I have tried as following with boto3 and it works me:
import boto3;
import contextlib;
import requests;
from io import BytesIO;
s3 = boto3.resource('s3');
s3Client = boto3.client('s3')
for bucket in s3.buckets.all():
print(bucket.name)
url = "#resource url";
with contextlib.closing(requests.get(url, stream=True, verify=False)) as response:
# Set up file stream from response content.
fp = BytesIO(response.content)
# Upload data to S3
s3Client.upload_fileobj(fp, 'aws-books', 'reviews_Electronics_5.json.gz')
Using the boto3 upload_fileobj method, you can stream a file to an S3 bucket, without saving to disk. Here is my function:
import boto3
import StringIO
import contextlib
import requests
def upload(url):
# Get the service client
s3 = boto3.client('s3')
# Rember to se stream = True.
with contextlib.closing(requests.get(url, stream=True, verify=False)) as response:
# Set up file stream from response content.
fp = StringIO.StringIO(response.content)
# Upload data to S3
s3.upload_fileobj(fp, 'my-bucket', 'my-dir/' + url.split('/')[-1])
S3 doesn't support remote upload as of now it seems. You may use the below class for uploading an image to S3. The upload method here first tries to download the image and keeps it in memory for sometime until it gets uploaded. To be able to connect to S3 you will have to install AWS CLI using command pip install awscli, then enter few credentials using command aws configure:
import urllib3
import uuid
from pathlib import Path
from io import BytesIO
from errors import custom_exceptions as cex
BUCKET_NAME = "xxx.yyy.zzz"
POSTERS_BASE_PATH = "assets/wallcontent"
CLOUDFRONT_BASE_URL = "https://xxx.cloudfront.net/"
class S3(object):
def __init__(self):
self.client = boto3.client('s3')
self.bucket_name = BUCKET_NAME
self.posters_base_path = POSTERS_BASE_PATH
def __download_image(self, url):
manager = urllib3.PoolManager()
try:
res = manager.request('GET', url)
except Exception:
print("Could not download the image from URL: ", url)
raise cex.ImageDownloadFailed
return BytesIO(res.data) # any file-like object that implements read()
def upload_image(self, url):
try:
image_file = self.__download_image(url)
except cex.ImageDownloadFailed:
raise cex.ImageUploadFailed
extension = Path(url).suffix
id = uuid.uuid1().hex + extension
final_path = self.posters_base_path + "/" + id
try:
self.client.upload_fileobj(image_file,
self.bucket_name,
final_path
)
except Exception:
print("Image Upload Error for URL: ", url)
raise cex.ImageUploadFailed
return CLOUDFRONT_BASE_URL + id
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from urllib import urlopen
def upload_images_s3(img_url):
try:
connection = boto.connect_s3('access_key', 'secret_key', calling_format=OrdinaryCallingFormat())
bucket = connection.get_bucket('boto-demo-1519388451')
file_obj = Key(bucket)
file_obj.key = img_url.split('/')[::-1][0]
fp = urlopen(img_url)
result = file_obj.set_contents_from_string(fp.read())
except Exception, e:
return e

Categories

Resources