Creating a tar stream in memory from multiple file byte streams - python

I'm trying to create a tar stream in memory add files to it and then save it to S3. But there is some issue and the files inside the ta have zero size. Can any one please advise? Code snippet below-
def tar_and_upload(bucket, keys, dest_bucket):
s3 = boto3.client('s3')
file_obj = io.BytesIO()
tar_file_obj = tarfile.open(mode = "w:gz", fileobj=file_obj)
response = {}
for key in keys:
obj = s3.get_object(Bucket=bucket, Key=key)
_bytes = obj["Body"].read()
_file_name = key.split("/")[-1]
tar_file_obj.addfile(tarfile.TarInfo(_file_name), _bytes)
tar_file_obj.close()
try:
obj_name = "{}.tar.gz".format(str(uuid.uuid4()))
s3.put_object(Body=file_obj.getvalue(), Bucket=dest_bucket, Key=obj_name)
except Exception as e:
logging.error("Can't save tar to S3", exc_info=True)
return

def tar_and_upload(bucket, keys, dest_bucket):
s3 = boto3.client('s3')
file_obj = io.BytesIO()
tar_file_obj = tarfile.open(mode = "w:gz", fileobj=file_obj)
response = {}
for key in keys:
obj = s3.get_object(Bucket=bucket, Key=key)
_bytes = obj["Body"].read()
_file_name = key.split("/")[-1]
info = tarfile.TarInfo(_file_name)
info.size = obj["ContentLength"]
info.mtime = s3.head_object(Bucket=bucket, Key=key)['LastModified'].timestamp()
tar_file_obj.addfile(info, io.BytesIO(_bytes))
tar_file_obj.close()
try:
obj_name = "{}.tar.gz".format(str(uuid.uuid4()))
s3.put_object(Body=file_obj.getvalue(), Bucket=dest_bucket, Key=obj_name)
except Exception as e:
logging.error("Can't save tar to S3", exc_info=True)
return
For others, looking to do the same for s3 object

Okay apparently when adding byte streams to a tar, we need to explicitly specify the size.
Sample code-
import tarfile
import uuid
import io
import os
def tar_and_upload():
file_obj = io.BytesIO()
tar_file_obj = tarfile.open(mode = "w:gz", fileobj=file_obj)
for filename in os.listdir("images"):
print(filename)
file_path = os.path.join("images", filename)
#tar_file_obj.add(file_path)
with open(file_path, "rb") as f:
_bytes = f.read()
tar_info = tarfile.TarInfo(filename)
tar_info.size = len(_bytes)
tar_file_obj.addfile(tar_info, io.BytesIO(_bytes))
tar_file_obj.close()
try:
obj_name = "{}.tar.gz".format(str(uuid.uuid4()))
object_path = os.path.join("temp", obj_name)
with open(object_path, "wb") as f:
f.write(file_obj.getvalue())
print(obj_name)
except Exception as e:
print(str(e))
if __name__ == "__main__":
tar_and_upload()

Related

How to read all the files from a directory in s3 bucket using Python in cloud functions

Here is my code: I am trying to read all the files of the same format from the s3 bucket
Error : "Could not establish source connection [Errno 2] No such file or directory: '/user_code/s3:/"
def s3_file_read(self,source)
bucket_name = 'xxx'
region='xxx'
object_name = 's3-folder-name/'
ACCESS_KEY_ID = 'xxx'
ACCESS_SECRET_KEY = 'xxx'
s3_client = boto3.client('s3',aws_access_key_id=ACCESS_KEY_ID,aws_secret_access_key=ACCESS_SECRET_KEY,region_name=region)
file_path = "s3://your-bucket-name/folder-name/"
prefix = os.path.abspath(file_path)
file_list = [os.path.join(prefix, f) for f in os.listdir(prefix) if f.endswith('.csv')]
print('##################################Reading the file#############################')
file_type = source['fileType'].lower()
if source['fileType'] == 'csv':
try:
obj = s3_client.get_object(Bucket= bucket_name, Key= object_name)
file_df = pd.read_csv(obj['Body'])
print("CSV File read success")
except Exception as e:
print("Could not read the file {}".format(e))
else:
print("File format supported CSV")
[1]: https://i.stack.imgur.com/6pX8d.png
I've made some assumptions about what you'd like to do here, but this code will read the keys in a bucket, and create a list of .csv objects only. Then you can read that list and test if a dataframe can be created. If you want to read all those files into one larger dataframe then the end of your function needs to be rewritten.
s3sr = boto3.resource('s3')
#there are other examples of collecting objects, this is just what I use
def get_keys_from_prefix(self, bucket, prefix):
'''gets list of keys for given bucket and prefix'''
keys_list = []
paginator = s3sr.meta.client.get_paginator('list_objects_v2')
# use Delimiter to limit search to that level of hierarchy
for page in paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter='/'):
keys = [content['Key'] for content in page.get('Contents')]
print('keys in page: ', len(keys))
keys_list.extend(keys)
return keys_list
def s3_file_read(self,source):
bucket_name = 'xxx'
region='xxx'
prefix = 's3-folder-name/' # if no prfex, pass ''
ACCESS_KEY_ID = 'xxx'
ACCESS_SECRET_KEY = 'xxx'
s3_client = boto3.client('s3',aws_access_key_id=ACCESS_KEY_ID,aws_secret_access_key=ACCESS_SECRET_KEY,region_name=region)
keys_list = self.get_keys_from_prefix(bucket_name, prefix)
csv_list = [f for f in keys_list if f.endswith('.csv')]
for csvfile in csv_list:
try:
obj = s3_client.get_object(Bucket= bucket_name, Key= csvfile)
file_df = pd.read_csv(obj['Body'])
print("CSV File read success")
except Exception as e:
print("Could not read the file {}".format(e))

How to zip a folder in python with password?

With pyminizip i am able to zip a file with password in python :
filepath=r"C:\Users\xxx\Desktop\myFolder\file.txt"
import pyminizip
pyminizip.compress(filepath, None,"output.zip", "password", 0)
But how do I zip the whole folder 'myFolder' into a zip file with password?
I tried removing the filename from the path but it gives the error
OSError: error in opening C:\Users\xxx\Desktop\myFolder for reading
EDIT :
The below link has a function which will zip the directory. But It wont add a password.
https://www.calazan.com/how-to-zip-an-entire-directory-with-python/
If anyone can let me know if it is possible to add a password to an existing zip file, that will solve my problem. Is that possible?
I was finally able to accomplish encryping the whole directory(including all subfolder struncture and files) using a library called 'pyzipper' suggested by Anupam Chaplot.
Here is the solution :
def zip_folderPyzipper(folder_path, output_path):
"""Zip the contents of an entire folder (with that folder included
in the archive). Empty subfolders will be included in the archive
as well.
"""
parent_folder = os.path.dirname(folder_path)
# Retrieve the paths of the folder contents.
contents = os.walk(folder_path)
try:
zip_file = pyzipper.AESZipFile('new_test.zip','w',compression=pyzipper.ZIP_DEFLATED,encryption=pyzipper.WZ_AES)
zip_file.pwd=b'PASSWORD'
for root, folders, files in contents:
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_path = os.path.join(root, folder_name)
relative_path = absolute_path.replace(parent_folder + '\\',
'')
print ("Adding '%s' to archive." % absolute_path)
zip_file.write(absolute_path, relative_path)
for file_name in files:
absolute_path = os.path.join(root, file_name)
relative_path = absolute_path.replace(parent_folder + '\\',
'')
print ("Adding '%s' to archive." % absolute_path)
zip_file.write(absolute_path, relative_path)
print ("'%s' created successfully." % output_path)
except IOError as message:
print (message)
sys.exit(1)
except OSError as message:
print(message)
sys.exit(1)
except zipfile.BadZipfile as message:
print (message)
sys.exit(1)
finally:
zip_file.close()
Since I am new in python i cant explain the code in detail. Here are the references :
https://pypi.org/project/pyzipper/
https://www.calazan.com/how-to-zip-an-entire-directory-with-python/
To extract the Generated ZIP file in windows :
Right Click - > Unzip(Encripted)
If you directly click Extract All option, then it will give error
Try this:
Firstly check here please for pynzip. After that try it.
import pyminizip as pyzip
compression = 8
pyzip.compress("test.txt", "test.zip", "Pswrd", compression)
Here is how to copy all a directory with its subdirectories and its files, then compress it and encrypt a zip, with password and without needing an associated backup file, here we will see how to authorize a mac address to execute the decryption. So then it's up to you to change or improve the script.
But the essentials work very well.
After a lot of research, testing and thinking, I created this effective solution
my setup:
Python 3.8 64:bits on windows 7 64:bits
Usage terminology:
First step, we need to import the cryptography module
check for support or other is here https://cryptography.io/en/latest/installation/
command:
pip install cryptography
Then we will use the fernet object resulting from this module
https://cryptography.io/en/latest/fernet/
with password
https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet
and shutil:
https://docs.python.org/3/library/shutil.html
file second.py:
import os
import re, uuid
import string
import shutil
import zlib
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import base64
import zipfile
class zipy:
def __init__(self, pathDir=None):
"""If pathDir optional is none, this script copy all directory in current execution."""
if pathDir != None:
if os.path.isdir(pathDir):
pathDir = pathDir.replace(os.sep, '/')
if pathDir.endswith('/'):
self.root = pathDir
else:
self.root = pathDir + '/'
else:
self.root = os.getcwd()+os.sep
self.root = self.root.replace(os.sep, '/')
else:
self.root = os.getcwd()+os.sep
self.root = self.root.replace(os.sep, '/')
os.chdir(self.root)
self.name = 'sauvegarde'
self.dirSauvegarde = self.root+self.name
self.dirSauvegarde = self.dirSauvegarde.replace(os.sep, '/')
lectureDossier = os.listdir(self.root)
print(lectureDossier)
self.path_system = {}
for element in lectureDossier:
if os.path.isdir(element):
if element != '__pycache__':
self.path_system[element] = self.root + element + os.sep.replace(os.sep, '/')
self.path_system[element] = self.path_system[element].replace(os.sep, '/')
else:
pass
elif os.path.isfile(element):
self.path_system[element] = self.root + element
self.path_system[element] = self.path_system[element].replace(os.sep, '/')
else:
pass
self.zipi = myZip(self.dirSauvegarde)
def save(self):
"""sauvegarde le fichier"""
self.createDir(self.dirSauvegarde)
chemin_src = ""
chemin_dist = ""
for element in self.path_system:
if element != self.dirSauvegarde:
chemin_src = self.root+element
chemin_dest = self.dirSauvegarde + os.sep + element
chemin_dest = chemin_dest.replace(os.sep, '/')
if os.path.isdir(chemin_src):
self.copyDir(chemin_src, chemin_dest)
else:
self.copyFile(chemin_src, chemin_dest)
self.zipi.zip(zip_exist=True)
self.delDir(self.dirSauvegarde)
def copyDir(self, src, dest):
try:
shutil.copytree(src, dest, dirs_exist_ok=True)
except:
pass
def copyFile(self, src, dest):
try:
shutil.copyfile(src, dest)
except:
pass
def createDir(self, dirPath):
if os.path.isdir(dirPath):
self.delDir(dirPath)
else:
pass
os.makedirs(dirPath, exist_ok=True)
def delDir(self, dir):
if os.path.isdir(dir):
if len(os.listdir(dir)) > 0:
try:
print('rmtree')
shutil.rmtree(dir, ignore_errors=True)
except:
pass
else:
try:
os.rmdir(dir)
except:
pass
def decrypt(self):
self.zipi.unzip()
class myZip:
def __init__(self, dir):
self.pathDir = dir
self.nom = os.path.basename(dir)
self.pathZip = self.pathDir + '.zip'
self.crypt = Encryptor()
def zip(self, zip_exist=False):
if zip_exist == False:
pass
else:
if os.path.isfile(self.pathZip):
try:
os.remove(self.pathZip)
except:
pass
shutil.make_archive(os.path.splitext(self.pathZip)[0], 'zip', self.pathDir)
key = self.crypt.key_create()
#TEST
self.crypt.file_encrypt(key, self.pathZip, self.pathZip)
self.crypt.key_write(self.pathZip, key)
def unzip(self):
#TEST
if self.crypt.checkPass(self.pathZip):
#print('ok adresse mac autoriser')
key = self.crypt.key_load(self.pathZip)
self.crypt.file_decrypt(key, self.pathZip, self.pathZip)
else:
print('pas ok adresse mac erroner')
class Encryptor:
def __init__(self):
self.salto = None
def key_create(self):
password = self.getMac()
password = bytes(password, encoding="utf-8")
self.salto = os.urandom(16)
print(self.salto)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=self.salto,
iterations=100,
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def key_write(self, pathZip, key):
with zipfile.ZipFile(pathZip, 'a') as zip:
zip.comment = key + bytes(' byMe ', encoding="utf-8") + self.salto
def key_load(self, pathZip):
stri = []
with zipfile.ZipFile(pathZip, 'a') as zip:
stri = zip.comment.split(b' byMe ')
print(stri[0])
print(stri[1])
key = stri[0]
self.salto = stri[1]
return key
def checkPass(self, pathZip):
key = base64.urlsafe_b64decode(self.key_load(pathZip))
salt = self.salto
mdp = self.getMac()
mdp = bytes(mdp, encoding="utf-8")
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100,
)
retour = False
try:
kdf.verify(mdp, key)
retour = True
except:
retour = False
return retour
def file_encrypt(self, key, original_file, encrypted_file):
f = Fernet(key)
with open(original_file, 'rb') as file:
original = file.read()
encrypted = f.encrypt(original)
with open (encrypted_file, 'wb') as file:
file.write(encrypted)
def file_decrypt(self, key, encrypted_file, decrypted_file):
f = Fernet(key)
with open(encrypted_file, 'rb') as file:
encrypted = file.read()
decrypted = f.decrypt(encrypted)
with open(decrypted_file, 'wb') as file:
file.write(decrypted)
def getMac(self):
return "".join(re.findall('..', '%012x' % uuid.getnode()))
Use like this:
file : main.py
from second import zipy
#If the argument is empty, the script will make a copy of the directory being executed, otherwise the script will work and output the zip in the place indicated in argument
dd = zipy("E:/path")
#or dd = zipy("E:/path/") or dd = zipy() if you give arg, give absolute path
#Save the zip and encrypt it. Change second.py to directly give it a password as an argument
dd.save()
#decrypt zip
dd.decrypt()
Here's a snippet with pyminizip: gets a list of files and zips the whole thing.
import pyminizip
import os
def get_paths_recursively(src_root_path):
files = []
if src_root_path is not None:
for root, directories, filenames in os.walk(src_root_path):
entries = []
for filename in filenames:
full_file_name = os.path.join(root, filename)
if os.path.isfile(full_file_name) and not filename.startswith('.'):
files.append(os.path.join(root, filename))
return files
def pyminizip_zipper(folder_path, output_path, password):
paths = get_paths_recursively(folder_path)
roots = []
for path in paths:
roots.append(os.path.dirname(path.replace(os.path.dirname(folder_path), './')))
pyminizip.compress_multiple(paths, roots, output_path, password, 5)

s3 upload from base64 using Lambda

I have the following code:
import base64
imgdata = base64.b64decode(new_string)
filename = 'image.jpg' # I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
f.write(imgdata)
It saves a file as jpg and I can open it.
How can I upload this to s3 bucket, or any other service, and return a URL Security is not an issue.
I tried
try:
convertedFileString = fstring.replace('-', '+').replace('_','/').replace(',','=')
imgdata = base64.b64decode(new_string)
# I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
s3 = boto3.resource('s3')
bucket = s3.Bucket('ag-grid')
bucket.put_object(Key=filename, Body=f)
except Exception as e:
return {
'statusCode': 500,
'body': str(e)
}
So, I'm not sure if I understand your problem.
I use the code below to upload and it's working.
# Filename
new_name = '{}_{}_{}_{}_{}_{}x{}.{}'.format(cid, uid, id_service_order, id_question, uuid.uuid4(), 0, 0,
fileExtension) # type: str
key = "{}".format(new_name)
# Let's use Amazon S3
s3 = boto3.client("s3",
aws_access_key_id=aws_config.aws_access_key_id,
aws_secret_access_key=aws_config.aws_secret_access_key,
region_name=aws_config.aws_s3_region,
config=Config(signature_version='s3v4'))
dec = base64.b64decode(img_base64)
rs = s3.put_object(
Bucket=aws_config.aws_s3_bucket,
Key=key,
ContentType=fileType,
Body=dec,
ACL='public-read'
)
print(rs)
print(new_name)
Does that help you?

Progress bar while uploading a file to dropbox

import dropbox
client = dropbox.client.DropboxClient('<token>')
f = open('/ssd-scratch/abhishekb/try/1.mat', 'rb')
response = client.put_file('/data/1.mat', f)
I want to upload a big file to dropbox. How can I check the progress? [Docs]
EDIT:
The uploader offeset is same below somehow. What am I doing wrong
import os,pdb,dropbox
size=1194304
client = dropbox.client.DropboxClient(token)
path='D:/bci_code/datasets/1.mat'
tot_size = os.path.getsize(path)
bigFile = open(path, 'rb')
uploader = client.get_chunked_uploader(bigFile, size)
print "uploading: ", tot_size
while uploader.offset < tot_size:
try:
upload = uploader.upload_chunked()
print uploader.offset
except rest.ErrorResponse, e:
print("something went wrong")
EDIT 2:
size=1194304
tot_size = os.path.getsize(path)
bigFile = open(path, 'rb')
uploader = client.get_chunked_uploader(bigFile, tot_size)
print "uploading: ", tot_size
while uploader.offset < tot_size:
try:
upload = uploader.upload_chunked(chunk_size=size)
print uploader.offset
except rest.ErrorResponse, e:
print("something went wrong")
upload_chunked, as the documentation notes:
Uploads data from this ChunkedUploader's file_obj in chunks, until an
error occurs. Throws an exception when an error occurs, and can be
called again to resume the upload.
So yes, it uploads the entire file (unless an error occurs) before returning.
If you want to upload a chunk at a time on your own, you should use upload_chunk and commit_chunked_upload.
Here's some working code that shows you how to upload a single chunk at a time and print progress in between chunks:
from io import BytesIO
import os
from dropbox.client import DropboxClient
client = DropboxClient(ACCESS_TOKEN)
path = 'test.data'
chunk_size = 1024*1024 # 1MB
total_size = os.path.getsize(path)
upload_id = None
offset = 0
with open(path, 'rb') as f:
while offset < total_size:
offset, upload_id = client.upload_chunk(
BytesIO(f.read(chunk_size)),
offset=offset, upload_id=upload_id)
print('Uploaded so far: {} bytes'.format(offset))
# Note the "auto/" on the next line, which is needed because
# this method doesn't attach the root by itself.
client.commit_chunked_upload('auto/test.data', upload_id)
print('Upload complete.')

Use AWS lambda function to convert S3 file from zip to gzip using boto3 python

I need to convert a .zip file from S3 to a .gzip file using boto3 python in an AWS lambda function. Any suggestions on how to do this?
Here is what I have so far:
import json
import boto3
import zipfile
import gzip
s3 = boto3.resource('s3')
def lambda_handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
try:
s3Obj = s3.Object(bucket_name=bucket, key=key)
response = s3Obj.get()
data = response['Body'].read()
zipToGzip = gzip.open(data, 'wb')
zipToGzip.write(s3.upload_file(bucket, (s3 + '.gz')))
zipToGzip.close()
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
raise e
OK, got it figured out. Thanks for your input Lee.
import json
import boto3
import zipfile
import gzip
print('Loading function')
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
def lambda_handler(event, context):
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
try:
s3_client.download_file(bucket, key, '/tmp/file.zip')
zfile = zipfile.ZipFile('/tmp/file.zip')
namelist = zfile.namelist()
if len(namelist) >1:
pass
#alertme()
for filename in namelist:
data = zfile.read(filename)
f = open('/tmp/' + str(filename), 'wb')
f.write(data)
f.close()
zipToGzip = gzip.open('/tmp/data.gz', 'wb')
zipToGzip.write(data)
zipToGzip.close()
s3_client.upload_file('/tmp/data.gz', bucket, key + '.gz')
s3_client.delete_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
raise e

Categories

Resources