I have been having some issues with my aws lambda that unzips a file inside of our s3 bucket. I created a script that would activate from a json payload that gets those passed through to it. The problem is it seems to be loosing the parent folder of the zip file and uploading the child folders underneath it. This is an issue for me as we also have another script I made to parse a log4j file inside of a folder to review for errors. That script is having problems because of the name lost that defines the farm the folder comes from.
To give an example of the issue ---
There's an s3 bucket on us-east, and inside that bucket is a key for "OriginalFolder.zip". When this lambda is activated it unzips and places the child file into the exact same bucket and place where the original zip file is but names it "Log.folder". I want it to keep the original name of the zip file so that when multiple farms are activating this lambda it doesn't overwrite that folder that's created or get confused on which one to read from with the second lambda.
I tried to append something at the end of the created file name to allow for params to be passed through for each farm that runs it but can't seem to make it work. I also contemplated having a separate action called in the script to copy and rename it using boto3 but I would rather not use that as my first choice. I feel there has to be an easier method but might be overlooking it.
Any thoughts would be helpful.
Edit: Here's a picture of the example. The green arrow is what I want it to stay named as. The red arrow is what the file is becoming named inside of our s3 environment. "on1" is the next folder inside "update-dc-logs-test".
import os
import tempfile
import zipfile
from concurrent import futures
from io import BytesIO
import boto3
s3 = boto3.client('s3')
def handler(event, context):
# Parse and prepare required items from event
global bucket, path, zipdata, rn_file
action = event.get("action", None)
if action == "create" or action == "update":
bucket = event['payload']['BucketName']
key = event['payload']['Key']
#rn_file = event['payload']['RenameFile']
path = os.path.dirname(key)
# Create temporary file
temp_file = tempfile.mktemp()
# Fetch and load target file
s3.download_file(bucket, key, temp_file)
zipdata = zipfile.ZipFile(temp_file)
# Call action method with using ThreadPool
with futures.ThreadPoolExecutor(max_workers=4) as executor:
future_list = [
executor.submit(extract, filename)
for filename in zipdata.namelist()
]
result = {'success': [], 'fail': []}
for future in future_list:
filename, status = future.result()
result[status].append(filename)
return result
def extract(filename):
# Extract zip and place it back in bucket
upload_status = 'success'
try:
s3.upload_fileobj(
BytesIO(zipdata.read(filename)),
bucket,
os.path.join(path, filename)
)
except Exception:
upload_status = 'fail'
finally:
return filename, upload_status
You are prefixing all uploaded files with path which is the path at which the ZIP file is found. If you want the uploaded files to be stored below a prefix which is the path and name of the ZIP file (minus the .zip extension), then change the value of path to this:
path = os.path.splitext(key)[0]
Now, instead of path holding the ZIP file's folder prefix it will contain the folder prefix plus the first part of the ZIP filename. For example, if an object is uploaded to folder1/myarchive.zip then path would previously contain folder1, but with this change it will now contain folder1/myarchive.
When that new path is combined in the extract function via os.path.join(path, filename), the object will now be uploaded to folder1/myarchive/on1/file.txt.
Hi guys I need to generate a zip file from a bunch of images and add it as a file to the database
This is the code to generate the file
def create_zip_folder(asin):
print('creating zip folder for asin: ' + asin)
asin_object = Asin.objects.get(asin=asin)
# create folder
output_dir = f"/tmp/{asin}"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# download images
for img in asin_object.asin_images.all():
urllib.request.urlretrieve(img.url, f"{output_dir}/{img.id}.jpg")
# zip all files in output_dir
zip_file = shutil.make_archive(asin, 'zip', output_dir)
asin_object.zip_file = zip_file
asin_object.has_zip = True
asin_object.save()
# delete folder
shutil.rmtree(output_dir)
return True
This all works and I can see the files generated in my editor but when I try to access it in the template asin.zip_file.url I get this error
SuspiciousOperation at /history/
Attempted access to '/workspace/B08MFR2DRS.zip' denied.
Why is this happening? I thought the file is to be uploaded to the file storage through the model but apparently it's in a restricted folder, this happens both in development (with local file storage) and in production (with s3 bucket as file storage)
I have a GCS called my-gcs with inconsistent subfolder such as;
parent-path/path1/path2/*
parent-path/path3/path4/path5/*
parent-path/path6/*
The files can be parquet/csv or other than this.
This is my function to copy the entire folder from local to GCS:
def upload_local_directory_to_gcs(src_path, dest_path, data_backup, file_name):
"""
Upload the whole directory to GCS
"""
logger.debug("Uploading directory...")
storage_client = storage.Client.from_service_account_json(key_path)
bucket = storage_client.get_bucket(GCS_BUCKET)
if os.path.isfile(src_path):
blob = bucket.blob(os.path.join(dest_path, os.path.basename(src_path)))
blob.upload_from_filename(src_path)
return
for item in glob.glob(src_path + '/*'):
file_exist = check_file_exist(data_backup, file_name)
if os.path.isfile(item):
print(item)
if file_exist is False:
blob = bucket.blob(os.path.join(dest_path, os.path.basename(item)),
chunk_size=10485760)
blob.upload_from_filename(item)
else:
logger.warning("Skipping upload. File already existed")
else:
if file_exist is False:
upload_local_directory_to_gcs(item, os.path.join(dest_path, os.path.basename(item)),
data_backup, file_name)
else:
logger.warning("Skipping upload. File already existed")
This is the function to check if specific file exist in the directory & sub-directory:
def check_file_exist(dataset, file_name):
"""
Check if files existed
"""
storage_client = storage.Client.from_service_account_json(key_path)
bucket = storage_client.bucket(GCS_BUCKET)
logger.debug("Checking if file already existed in GCS to skip upload...")
blobs = bucket.list_blobs(prefix=f'parent-path{dataset}/')
check_files = [blob.name for blob in blobs if file_name in blob.name] # if '.' in blob.name
return bool(len(check_files))
However the code is not running correctly. Say this path parent-path/path1/path2/* already has a file called first_file.csv. It will skip uploading the existing file in this path. Until it encounters a file that not yet existed, it will upload the file and overwrite the other files for all directories as well.
Where I was expecting it to only upload specific file that is not existed yet, without overwriting the other files.
I tried my best to explain... please help.
If you have a look to the documentation, you can see that on the Name property of the blob
The name of the blob. This corresponds to the unique path of the object in the bucket.
That means the value is not only the file name, but the fully qualified path + the name path/to/file.csv
If your loop, you check if a file name (file.csv for example) is included in the blob path. Consider this case
path/to/file.csv
path/to/to/file.csv
If you test is file.csv exists, both blobs will return true.
To fix your issue, you need to
Either compare the strict equality of the target_path + file_name and the blob.name
Or include an additional condition in your "if" to include the bucket path to check in addition to the file name.
Is it possible to perform a "deep" copy of Google Drive files, so that the copied file doesn't point to the same file object as the original? I'd like to be able to copy a file and have the copy be completely independent of the original, such that any modifications that are made to the copy don't also show up in the original. Using the following code I'm able to:
Create a folder in Google Drive
Copy a file into the new folder
But the problem is that any changes that are made to the copy also show up in the original. I'd like for the copied file to be a completely independent file. Is this possible?
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
gauth = GoogleAuth()
#load previously generated credentials file
gauth.LoadCredentialsFile("mycreds3.txt")
drive = GoogleDrive(gauth)
#define ID of file to be copied
template_file_id = "1RQWYeeth-Ph ..."
#create a new folder to store the copied file
folder = drive.CreateFile({"title":"test_folder", 'mimeType': 'application/vnd.google-apps.folder'})
folder.Upload()
folder_id = folder['id']
#copy file into newly created folder
drive.auth.service.files().copy(fileId=template_file_id,body={'parents':[{"kind":'drive#file',"id":folder_id}], 'title':'new_file_title'}).execute()
EDIT:
I was able to perform a deep copy by copying a shared file. When a file is copied from a shared file (which doesn't have a shortcut in Drive that links to the original), a deep copy is created such that modifications to the copied file don't show up in the original. Copying shared folders this way threw an error, but individual files worked just fine.
destination_folder_id = 'YTRCA18EE ...'
shared_files = drive.ListFile({'q':'sharedWithMe'}).GetList()
for file in shared_files:
drive.auth.service.files().copy(fileId=file['id'],body={'parents':[{"kind":'drive#file',"id":destination_folder_id}], 'title':file['title']}).execute()
Lets take this step by step
The way this library works is that all calls must go through a service. In this case a drive service will give your application access to all the methods available in the Google drive api.
drive_service = GoogleDrive(gauth)
You have named your variable drive when creating your drive_service for constastancy.
Creating a new file and uploading it to google drive is a two part process. The first part is the file_metadata , that being the name and description of the file. The second is the media or the actual file data itself.
file_metadata = {'name': 'photo.jpg'}
media = MediaFileUpload('files/photo.jpg', mimetype='image/jpeg')
file = drive_service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
print 'File ID: %s' % file.get('id')
Note: all fields does is limit the response returned by the api to only the file id.
I have successfully uploaded single text file on Google Cloud Storage. But when i try to upload whole folder, It gives permission denied error.
filename = "d:/foldername" #here test1 is the folder.
Error:
Traceback (most recent call last):
File "test1.py", line 142, in <module>
upload()
File "test1.py", line 106, in upload
media = MediaFileUpload(filename, chunksize=CHUNKSIZE, resumable=True)
File "D:\jatin\Project\GAE_django\GCS_test\oauth2client\util.py", line 132, in positional_wrapper
return wrapped(*args, **kwargs)
File "D:\jatin\Project\GAE_django\GCS_test\apiclient\http.py", line 422, in __init__
fd = open(self._filename, 'rb')
IOError: [Errno 13] Permission denied: 'd:/foldername'
This works for me. Copy all content from a local directory to a specific bucket-name/full-path (recursive) in google cloud storage:
import glob
from google.cloud import storage
def upload_local_directory_to_gcs(local_path, bucket, gcs_path):
assert os.path.isdir(local_path)
for local_file in glob.glob(local_path + '/**'):
if not os.path.isfile(local_file):
upload_local_directory_to_gcs(local_file, bucket, gcs_path + "/" + os.path.basename(local_file))
else:
remote_path = os.path.join(gcs_path, local_file[1 + len(local_path):])
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
upload_local_directory_to_gcs(local_path, bucket, BUCKET_FOLDER_DIR)
A version without a recursive function, and it works with 'top level files' (unlike the top answer):
import glob
import os
from google.cloud import storage
GCS_CLIENT = storage.Client()
def upload_from_directory(directory_path: str, dest_bucket_name: str, dest_blob_name: str):
rel_paths = glob.glob(directory_path + '/**', recursive=True)
bucket = GCS_CLIENT.get_bucket(dest_bucket_name)
for local_file in rel_paths:
remote_path = f'{dest_blob_name}/{"/".join(local_file.split(os.sep)[1:])}'
if os.path.isfile(local_file):
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
A folder is a cataloging structure containing references to files and directories. The library will not accept a folder as an argument.
As far as I understand, your use case is to make an upload to GCS preserving a local folder structure. To accomplish that you can use the os python module and make a recursive function (e.g process_folder) that will take path as an argument. This logic can be used for the function:
Use os.listdir() method to get a list of objects within the source path (will return both files and folders).
Iterate over a list from step 1 to separate files from folders via os.path.isdir() method.
Iterate over files and upload them with adjusted path (e.g. path+ “/“ + file_name).
Iterate over folders making a recursive call (e.g. process_folder(path+folder_name)).
It’ll be necessary to work with two paths:
Real system path (e.g. “/Users/User/…/upload_folder/folder_name”) used with os module.
Virtual path for GCS file uploads (e.g. “upload”+”/“ + folder_name + ”/“ + file_name).
Don’t forget to implement exponential backoff referenced at [1] to deal with 500 errors. You can use a Drive SDK example at [2] as a reference.
[1] - https://developers.google.com/storage/docs/json_api/v1/how-tos/upload#exp-backoff
[2] - https://developers.google.com/drive/web/handle-errors
I assume the sheer filename = "D:\foldername" is not enough info about the source code. Neither am I sure that this is even possible.. via the web interface you can also just upload files or create folders where you then upload the files.
You could save the folders name, then create it (I've never used the google-app-engine, but I guess that should be possible) and then upload the contents to the new folder
Refer -
https://hackersandslackers.com/manage-files-in-google-cloud-storage-with-python/
from os import listdir
from os.path import isfile, join
...
def upload_files(bucketName):
"""Upload files to GCP bucket."""
files = [f for f in listdir(localFolder) if isfile(join(localFolder, f))]
for file in files:
localFile = localFolder + file
blob = bucket.blob(bucketFolder + file)
blob.upload_from_filename(localFile)
return f'Uploaded {files} to "{bucketName}" bucket.'
The solution can also be used for windows systems. Simply provide the folder name to upload the destination bucket name.Additionally, it can handle any level of subdirectories in a folder.
import os
from google.cloud import storage
storage_client = storage.Client()
def upload_files(bucketName, folderName):
"""Upload files to GCP bucket."""
bucket = storage_client.get_bucket(bucketName)
for path, subdirs, files in os.walk(folderName):
for name in files:
path_local = os.path.join(path, name)
blob_path = path_local.replace('\\','/')
blob = bucket.blob(blob_path)
blob.upload_from_filename(path_local)
Here is my recursive implementation . we need to create a file named gdrive_utils.py and write the following.
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from apiclient.http import MediaFileUpload, MediaIoBaseDownload
import pickle
import glob
import os
# The following scopes are required for access to google drive.
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive.metadata',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive.appdata']
def get_gdrive_service():
"""
Tries to authenticate using a token. If token expires or not present creates one.
:return: Returns authenticated service object
:rtype: object
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'keys/client-secret.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# return Google Drive API service
return build('drive', 'v3', credentials=creds)
def createRemoteFolder(drive_service, folderName, parent_id):
# Create a folder on Drive, returns the newely created folders ID
body = {
'name': folderName,
'mimeType': "application/vnd.google-apps.folder",
'parents': [parent_id]
}
root_folder = drive_service.files().create(body = body, supportsAllDrives=True, fields='id').execute()
return root_folder['id']
def upload_file(drive_service, file_location, parent_id):
# Create a folder on Drive, returns the newely created folders ID
body = {
'name': os.path.split(file_location)[1],
'parents': [parent_id]
}
media = MediaFileUpload(file_location,
resumable=True)
file_details = drive_service.files().create(body = body,
media_body=media,
supportsAllDrives=True,
fields='id').execute()
return file_details['id']
def upload_file_recursively(g_drive_service, root, folder_id):
files_list = glob.glob(root)
if files_list:
for file_contents in files_list:
if os.path.isdir(file_contents):
# create new _folder
new_folder_id = createRemoteFolder(g_drive_service, os.path.split(file_contents)[1],
folder_id)
upload_file_recursively(g_drive_service, os.path.join(file_contents, '*'), new_folder_id)
else:
# upload to given folder id
upload_file(g_drive_service, file_contents, folder_id)
After that use the following
import os
from gdrive_utils import createRemoteFolder, upload_file_recursively, get_gdrive_service
g_drive_service = get_gdrive_service()
FOLDER_ID_FOR_UPLOAD = "<replace with folder id where you want upload>"
main_folder_id = createRemoteFolder(g_drive_service, '<name_of_main_folder>', FOLDER_ID_FOR_UPLOAD)
And finally use this
upload_file_recursively(g_drive_service, os.path.join("<your_path_>", '*'), main_folder_id)
I just came across the gcsfs library which seems to be also about better interfaces
You could copy an entire directory into a gcs location like this:
def upload_to_gcs(src_dir: str, gcs_dst: str):
fs = gcsfs.GCSFileSystem()
fs.put(src_dir, gcs_dst, recursive=True)
Another option is to use gsutils, the command-line tool for interacting with Google Cloud:
gsutil cp -r ./my/local/directory gs://my_gcp_bucket/foo/bar
The -r flag tells gsutils to copy recursively. Link gsutils to documentation.
Invoking gsutils in Python can be done like this:
import subprocess
subprocess.check_call('gsutil cp -r ./my/local/directory gs://my_gcp_bucket/foo/bar')