how to upload local video to aws s3 using boto3 flask - python

I am new in flask.
I want to upload video in s3 aws using flask. Also, filename changes every time when a new file upload.
And there is subfolder in bucket like bucketname/videos I want to upload in videos
def uploadvideo():
finalResult = 'abc.mp4'
s3_bucket_video_url = 'media-storage-beta' + '/videos/' + 'video'
s3 = boto3.client('s3')
response = s3.upload_file(
finalResult, 'media-storage-beta', 'video')
return 'success'

You need to install boto3 in your virtualenv. Also depends where you are running Flask app. If you trying to upload from local. You need AWS access keys in the ENV variables. You can define a route like this in flask. A form with videotitle and upload file in form is used here.
#app.route('/uploadvideo', methods=['POST'])
def uploadvideo():
if request.method == 'POST':
print(request.form['videotitle'])
f_video = request.files['videofile']
print(f_video.filename)
# create video_url
s3_bucket_video_url = <Main_Folder> + '/videos/' + video_file_name
s3_client = boto3.client('s3')
response = s3_client.upload_file(
f.filename, <BUCKET_NAME>,s3_bucket_video_url)
return 'Success'
Thanks
Ashish

Related

Uploading image on AWS S3 using boto is having the size: 0 byte, Python, Flask

I am trying to upload the image to my bucket on AWS S3. Earlier it was working fine. But now the uploaded image is having the size 0 byte. I have tried rolling back to previous versions of my project on GitHub. But nothing seems to work now. I am stuck on this issue for 2 days now.
def upload_to_aws(local_file, bucket_name, s3_file):
s3 = boto3.client('s3', aws_access_key_id=BaseConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key=BaseConfig.AWS_SECRET_ACCESS_KEY)
s3.upload_fileobj(local_file, bucket_name, s3_file)
file_url = '%s/%s/%s' % (s3.meta.endpoint_url, bucket_name, s3_file)
return file_url
from werkzeug.datastructures import FileStorage
parser = reqparse.RequestParser()
parser.add_argument('image',
type=FileStorage,
required=True,
help='image is required',
location='files'
)
class Classifier(Resource):
def post(self):
data = Classifier.parser.parse_args()
image = data["image"]
key_name = "some-key-name"
upload_to_aws(image, BaseConfig.BUCKET_NAME, key_name)
return {message: "uploaded successfully"}, 200
The upload_fileobj() function will upload a file-like object to S3. You would pass it a file object returned from an open() command.
If the image variable contains a filename, you should be using upload_file() instead.

How to store an uploaded file in Heroku using Flask in Python?

I have made an app where image can be uploaded. Everything is working fine in the local server of Flask. But when I deployed my app on Heroku, after uploading an image it is not getting stored in the mentioned directory. Please any kind of help would be appreciated.
from flask import Flask,redirect,request,url_for
from flask import render_template as ren
import os
from werkzeug.utils import secure_filename
import uuid
app = Flask(__name__)
# FILE_PATH = os.environ.get("FILE_PATH")
FILE_PATH = "templates/uploads/"
#app.route("/")
def home():
return ren("index.html")
#app.route("/img-upload", methods=['GET','POST'])
def upload():
if request.method == 'POST':
if request.files:
image = request.files['image']
id = uuid.uuid1()
if secure_filename(image.filename):
filename = image.filename
ext = filename.rsplit(".",1)[1]
filename = id.hex + "." + ext ######### FileName of uploaded file ############
file_path = os.path.join(str(FILE_PATH),secure_filename(filename))
print(file_path)
image.save(file_path)
return redirect(request.url)
return ren("index.html")
if __name__ == '__main__':
app.run(debug=True)
Heroku has a ephemeral filesystem -- meaning that the file is only saved in Heroku while the dyno is running and is deleted afterwards.
The option I would use is AWS S3, where I have stored images on there.
Here's a good link to get you started with AWS S3 and how to set it up and use it: https://www.youtube.com/watch?v=kt3ZtW9MXhw
After you have set up your AWS S3 bucket:
import boto3
BUCKET = 'my-bucket-name'
s3 = boto3.client("s3", aws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'))
bucket_resource = s3
bucket_resource.upload_file(Bucket = BUCKET, Filename=picture_fn, Key=picture_fn) # uploading
# retrieving
image_file = s3.generate_presigned_url('get_object',
Params={
'Bucket': BUCKET,
'Key': picture_fn,
},
ExpiresIn=3600)
# deleting
s3.delete_object(Bucket=BUCKET, Key=picture_fn)

How to write parquet file to ECS in Flask python using boto or boto3

I have flask python rest api which is called by another flask rest api.
the input for my api is one parquet file (FileStorage object) and ECS connection and bucket details.
I want to save parquet file to ECS in a specific folder using boto or boto3
the code I have tried
def uploadFileToGivenBucket(self,inputData,file):
BucketName = inputData.ecsbucketname
calling_format = OrdinaryCallingFormat()
client = S3Connection(inputData.access_key_id, inputData.secret_key, port=inputData.ecsport,
host=inputData.ecsEndpoint, debug=2,
calling_format=calling_format)
#client.upload_file(BucketName, inputData.filename, inputData.folderpath)
bucket = client.get_bucket(BucketName,validate=False)
key = boto.s3.key.Key(bucket, inputData.filename)
fileName = NamedTemporaryFile(delete=False,suffix=".parquet")
file.save(fileName)
with open(fileName.name) as f:
key.send_file(f)
but it is not working and giving me error like...
signature_host = '%s:%d' % (self.host, port)
TypeError: %d format: a number is required, not str
I tried google but no luck Can anyone help me with this or any sample code for the same.
After a lot of hit and tried and time, I finally got the solution. I posting it for everyone else who are facing the same issue.
You need to use Boto3 and here is the code...
def uploadFileToGivenBucket(self,inputData,file):
BucketName = inputData.ecsbucketname
#bucket = client.get_bucket(BucketName,validate=False)
f = NamedTemporaryFile(delete=False,suffix=".parquet")
file.save(f)
endpointurl = "<your endpoints>"
s3_client = boto3.client('s3',endpoint_url=endpointurl, aws_access_key_id=inputData.access_key_id,aws_secret_access_key=inputData.secret_key)
try:
newkey = 'yourfolderpath/anotherfolder'+inputData.filename
response = s3_client.upload_file(f.name, BucketName,newkey)
except ClientError as e:
logging.error(e)
return False
return True

Download file from S3 bucket with Flask [duplicate]

Goal
Download file from s3 Bucket to users computer.
Context
I am working on a Python/Flask API for a React app. When the user clicks the Download button on the Front-End, I want to download the appropriate file to their machine.
What I've tried
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').download_file('hello.txt', '/tmp/hello.txt')
I am currently using some code that finds the path of the downloads folder and then plugging that path into download_file() as the second parameter, along with the file on the bucket that they are trying to download.
This worked locally, and tests ran fine, but I run into a problem once it is deployed. The code will find the downloads path of the SERVER, and download the file there.
Question
What is the best way to approach this? I have researched and cannot find a good solution for being able to download a file from the s3 bucket to the users downloads folder. Any help/advice is greatly appreciated.
You should not need to save the file to the server. You can just download the file into memory, and then build a Response object containing the file.
from flask import Flask, Response
from boto3 import client
app = Flask(__name__)
def get_client():
return client(
's3',
'us-east-1',
aws_access_key_id='id',
aws_secret_access_key='key'
)
#app.route('/blah', methods=['GET'])
def index():
s3 = get_client()
file = s3.get_object(Bucket='blah-test1', Key='blah.txt')
return Response(
file['Body'].read(),
mimetype='text/plain',
headers={"Content-Disposition": "attachment;filename=test.txt"}
)
app.run(debug=True, port=8800)
This is ok for small files, there won't be any meaningful wait time for the user. However with larger files, this well affect UX. The file will need to be completely downloaded to the server, then download to the user. So to fix this issue, use the Range keyword argument of the get_object method:
from flask import Flask, Response
from boto3 import client
app = Flask(__name__)
def get_client():
return client(
's3',
'us-east-1',
aws_access_key_id='id',
aws_secret_access_key='key'
)
def get_total_bytes(s3):
result = s3.list_objects(Bucket='blah-test1')
for item in result['Contents']:
if item['Key'] == 'blah.txt':
return item['Size']
def get_object(s3, total_bytes):
if total_bytes > 1000000:
return get_object_range(s3, total_bytes)
return s3.get_object(Bucket='blah-test1', Key='blah.txt')['Body'].read()
def get_object_range(s3, total_bytes):
offset = 0
while total_bytes > 0:
end = offset + 999999 if total_bytes > 1000000 else ""
total_bytes -= 1000000
byte_range = 'bytes={offset}-{end}'.format(offset=offset, end=end)
offset = end + 1 if not isinstance(end, str) else None
yield s3.get_object(Bucket='blah-test1', Key='blah.txt', Range=byte_range)['Body'].read()
#app.route('/blah', methods=['GET'])
def index():
s3 = get_client()
total_bytes = get_total_bytes(s3)
return Response(
get_object(s3, total_bytes),
mimetype='text/plain',
headers={"Content-Disposition": "attachment;filename=test.txt"}
)
app.run(debug=True, port=8800)
This will download the file in 1MB chunks and send them to the user as they are downloaded. Both of these have been tested with a 40MB .txt file.
A better way to solve this problem is to create presigned url. This gives you a temporary URL that's valid up to a certain amount of time. It also removes your flask server as a proxy between the AWS s3 bucket which reduces download time for the user.
def get_attachment_url():
bucket = 'BUCKET_NAME'
key = 'FILE_KEY'
client: boto3.s3 = boto3.client(
's3',
aws_access_key_id=YOUR_AWS_ACCESS_KEY,
aws_secret_access_key=YOUR_AWS_SECRET_KEY
)
return client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': key},
ExpiresIn=60) `

Downloading a file from an s3 Bucket to the USERS computer

Goal
Download file from s3 Bucket to users computer.
Context
I am working on a Python/Flask API for a React app. When the user clicks the Download button on the Front-End, I want to download the appropriate file to their machine.
What I've tried
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').download_file('hello.txt', '/tmp/hello.txt')
I am currently using some code that finds the path of the downloads folder and then plugging that path into download_file() as the second parameter, along with the file on the bucket that they are trying to download.
This worked locally, and tests ran fine, but I run into a problem once it is deployed. The code will find the downloads path of the SERVER, and download the file there.
Question
What is the best way to approach this? I have researched and cannot find a good solution for being able to download a file from the s3 bucket to the users downloads folder. Any help/advice is greatly appreciated.
You should not need to save the file to the server. You can just download the file into memory, and then build a Response object containing the file.
from flask import Flask, Response
from boto3 import client
app = Flask(__name__)
def get_client():
return client(
's3',
'us-east-1',
aws_access_key_id='id',
aws_secret_access_key='key'
)
#app.route('/blah', methods=['GET'])
def index():
s3 = get_client()
file = s3.get_object(Bucket='blah-test1', Key='blah.txt')
return Response(
file['Body'].read(),
mimetype='text/plain',
headers={"Content-Disposition": "attachment;filename=test.txt"}
)
app.run(debug=True, port=8800)
This is ok for small files, there won't be any meaningful wait time for the user. However with larger files, this well affect UX. The file will need to be completely downloaded to the server, then download to the user. So to fix this issue, use the Range keyword argument of the get_object method:
from flask import Flask, Response
from boto3 import client
app = Flask(__name__)
def get_client():
return client(
's3',
'us-east-1',
aws_access_key_id='id',
aws_secret_access_key='key'
)
def get_total_bytes(s3):
result = s3.list_objects(Bucket='blah-test1')
for item in result['Contents']:
if item['Key'] == 'blah.txt':
return item['Size']
def get_object(s3, total_bytes):
if total_bytes > 1000000:
return get_object_range(s3, total_bytes)
return s3.get_object(Bucket='blah-test1', Key='blah.txt')['Body'].read()
def get_object_range(s3, total_bytes):
offset = 0
while total_bytes > 0:
end = offset + 999999 if total_bytes > 1000000 else ""
total_bytes -= 1000000
byte_range = 'bytes={offset}-{end}'.format(offset=offset, end=end)
offset = end + 1 if not isinstance(end, str) else None
yield s3.get_object(Bucket='blah-test1', Key='blah.txt', Range=byte_range)['Body'].read()
#app.route('/blah', methods=['GET'])
def index():
s3 = get_client()
total_bytes = get_total_bytes(s3)
return Response(
get_object(s3, total_bytes),
mimetype='text/plain',
headers={"Content-Disposition": "attachment;filename=test.txt"}
)
app.run(debug=True, port=8800)
This will download the file in 1MB chunks and send them to the user as they are downloaded. Both of these have been tested with a 40MB .txt file.
A better way to solve this problem is to create presigned url. This gives you a temporary URL that's valid up to a certain amount of time. It also removes your flask server as a proxy between the AWS s3 bucket which reduces download time for the user.
def get_attachment_url():
bucket = 'BUCKET_NAME'
key = 'FILE_KEY'
client: boto3.s3 = boto3.client(
's3',
aws_access_key_id=YOUR_AWS_ACCESS_KEY,
aws_secret_access_key=YOUR_AWS_SECRET_KEY
)
return client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': key},
ExpiresIn=60) `

Categories

Resources