How to zip a folder in python with password? - python

With pyminizip i am able to zip a file with password in python :
filepath=r"C:\Users\xxx\Desktop\myFolder\file.txt"
import pyminizip
pyminizip.compress(filepath, None,"output.zip", "password", 0)
But how do I zip the whole folder 'myFolder' into a zip file with password?
I tried removing the filename from the path but it gives the error
OSError: error in opening C:\Users\xxx\Desktop\myFolder for reading
EDIT :
The below link has a function which will zip the directory. But It wont add a password.
https://www.calazan.com/how-to-zip-an-entire-directory-with-python/
If anyone can let me know if it is possible to add a password to an existing zip file, that will solve my problem. Is that possible?

I was finally able to accomplish encryping the whole directory(including all subfolder struncture and files) using a library called 'pyzipper' suggested by Anupam Chaplot.
Here is the solution :
def zip_folderPyzipper(folder_path, output_path):
"""Zip the contents of an entire folder (with that folder included
in the archive). Empty subfolders will be included in the archive
as well.
"""
parent_folder = os.path.dirname(folder_path)
# Retrieve the paths of the folder contents.
contents = os.walk(folder_path)
try:
zip_file = pyzipper.AESZipFile('new_test.zip','w',compression=pyzipper.ZIP_DEFLATED,encryption=pyzipper.WZ_AES)
zip_file.pwd=b'PASSWORD'
for root, folders, files in contents:
# Include all subfolders, including empty ones.
for folder_name in folders:
absolute_path = os.path.join(root, folder_name)
relative_path = absolute_path.replace(parent_folder + '\\',
'')
print ("Adding '%s' to archive." % absolute_path)
zip_file.write(absolute_path, relative_path)
for file_name in files:
absolute_path = os.path.join(root, file_name)
relative_path = absolute_path.replace(parent_folder + '\\',
'')
print ("Adding '%s' to archive." % absolute_path)
zip_file.write(absolute_path, relative_path)
print ("'%s' created successfully." % output_path)
except IOError as message:
print (message)
sys.exit(1)
except OSError as message:
print(message)
sys.exit(1)
except zipfile.BadZipfile as message:
print (message)
sys.exit(1)
finally:
zip_file.close()
Since I am new in python i cant explain the code in detail. Here are the references :
https://pypi.org/project/pyzipper/
https://www.calazan.com/how-to-zip-an-entire-directory-with-python/
To extract the Generated ZIP file in windows :
Right Click - > Unzip(Encripted)
If you directly click Extract All option, then it will give error

Try this:
Firstly check here please for pynzip. After that try it.
import pyminizip as pyzip
compression = 8
pyzip.compress("test.txt", "test.zip", "Pswrd", compression)

Here is how to copy all a directory with its subdirectories and its files, then compress it and encrypt a zip, with password and without needing an associated backup file, here we will see how to authorize a mac address to execute the decryption. So then it's up to you to change or improve the script.
But the essentials work very well.
After a lot of research, testing and thinking, I created this effective solution
my setup:
Python 3.8 64:bits on windows 7 64:bits
Usage terminology:
First step, we need to import the cryptography module
check for support or other is here https://cryptography.io/en/latest/installation/
command:
pip install cryptography
Then we will use the fernet object resulting from this module
https://cryptography.io/en/latest/fernet/
with password
https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet
and shutil:
https://docs.python.org/3/library/shutil.html
file second.py:
import os
import re, uuid
import string
import shutil
import zlib
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import base64
import zipfile
class zipy:
def __init__(self, pathDir=None):
"""If pathDir optional is none, this script copy all directory in current execution."""
if pathDir != None:
if os.path.isdir(pathDir):
pathDir = pathDir.replace(os.sep, '/')
if pathDir.endswith('/'):
self.root = pathDir
else:
self.root = pathDir + '/'
else:
self.root = os.getcwd()+os.sep
self.root = self.root.replace(os.sep, '/')
else:
self.root = os.getcwd()+os.sep
self.root = self.root.replace(os.sep, '/')
os.chdir(self.root)
self.name = 'sauvegarde'
self.dirSauvegarde = self.root+self.name
self.dirSauvegarde = self.dirSauvegarde.replace(os.sep, '/')
lectureDossier = os.listdir(self.root)
print(lectureDossier)
self.path_system = {}
for element in lectureDossier:
if os.path.isdir(element):
if element != '__pycache__':
self.path_system[element] = self.root + element + os.sep.replace(os.sep, '/')
self.path_system[element] = self.path_system[element].replace(os.sep, '/')
else:
pass
elif os.path.isfile(element):
self.path_system[element] = self.root + element
self.path_system[element] = self.path_system[element].replace(os.sep, '/')
else:
pass
self.zipi = myZip(self.dirSauvegarde)
def save(self):
"""sauvegarde le fichier"""
self.createDir(self.dirSauvegarde)
chemin_src = ""
chemin_dist = ""
for element in self.path_system:
if element != self.dirSauvegarde:
chemin_src = self.root+element
chemin_dest = self.dirSauvegarde + os.sep + element
chemin_dest = chemin_dest.replace(os.sep, '/')
if os.path.isdir(chemin_src):
self.copyDir(chemin_src, chemin_dest)
else:
self.copyFile(chemin_src, chemin_dest)
self.zipi.zip(zip_exist=True)
self.delDir(self.dirSauvegarde)
def copyDir(self, src, dest):
try:
shutil.copytree(src, dest, dirs_exist_ok=True)
except:
pass
def copyFile(self, src, dest):
try:
shutil.copyfile(src, dest)
except:
pass
def createDir(self, dirPath):
if os.path.isdir(dirPath):
self.delDir(dirPath)
else:
pass
os.makedirs(dirPath, exist_ok=True)
def delDir(self, dir):
if os.path.isdir(dir):
if len(os.listdir(dir)) > 0:
try:
print('rmtree')
shutil.rmtree(dir, ignore_errors=True)
except:
pass
else:
try:
os.rmdir(dir)
except:
pass
def decrypt(self):
self.zipi.unzip()
class myZip:
def __init__(self, dir):
self.pathDir = dir
self.nom = os.path.basename(dir)
self.pathZip = self.pathDir + '.zip'
self.crypt = Encryptor()
def zip(self, zip_exist=False):
if zip_exist == False:
pass
else:
if os.path.isfile(self.pathZip):
try:
os.remove(self.pathZip)
except:
pass
shutil.make_archive(os.path.splitext(self.pathZip)[0], 'zip', self.pathDir)
key = self.crypt.key_create()
#TEST
self.crypt.file_encrypt(key, self.pathZip, self.pathZip)
self.crypt.key_write(self.pathZip, key)
def unzip(self):
#TEST
if self.crypt.checkPass(self.pathZip):
#print('ok adresse mac autoriser')
key = self.crypt.key_load(self.pathZip)
self.crypt.file_decrypt(key, self.pathZip, self.pathZip)
else:
print('pas ok adresse mac erroner')
class Encryptor:
def __init__(self):
self.salto = None
def key_create(self):
password = self.getMac()
password = bytes(password, encoding="utf-8")
self.salto = os.urandom(16)
print(self.salto)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=self.salto,
iterations=100,
)
key = base64.urlsafe_b64encode(kdf.derive(password))
return key
def key_write(self, pathZip, key):
with zipfile.ZipFile(pathZip, 'a') as zip:
zip.comment = key + bytes(' byMe ', encoding="utf-8") + self.salto
def key_load(self, pathZip):
stri = []
with zipfile.ZipFile(pathZip, 'a') as zip:
stri = zip.comment.split(b' byMe ')
print(stri[0])
print(stri[1])
key = stri[0]
self.salto = stri[1]
return key
def checkPass(self, pathZip):
key = base64.urlsafe_b64decode(self.key_load(pathZip))
salt = self.salto
mdp = self.getMac()
mdp = bytes(mdp, encoding="utf-8")
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100,
)
retour = False
try:
kdf.verify(mdp, key)
retour = True
except:
retour = False
return retour
def file_encrypt(self, key, original_file, encrypted_file):
f = Fernet(key)
with open(original_file, 'rb') as file:
original = file.read()
encrypted = f.encrypt(original)
with open (encrypted_file, 'wb') as file:
file.write(encrypted)
def file_decrypt(self, key, encrypted_file, decrypted_file):
f = Fernet(key)
with open(encrypted_file, 'rb') as file:
encrypted = file.read()
decrypted = f.decrypt(encrypted)
with open(decrypted_file, 'wb') as file:
file.write(decrypted)
def getMac(self):
return "".join(re.findall('..', '%012x' % uuid.getnode()))
Use like this:
file : main.py
from second import zipy
#If the argument is empty, the script will make a copy of the directory being executed, otherwise the script will work and output the zip in the place indicated in argument
dd = zipy("E:/path")
#or dd = zipy("E:/path/") or dd = zipy() if you give arg, give absolute path
#Save the zip and encrypt it. Change second.py to directly give it a password as an argument
dd.save()
#decrypt zip
dd.decrypt()

Here's a snippet with pyminizip: gets a list of files and zips the whole thing.
import pyminizip
import os
def get_paths_recursively(src_root_path):
files = []
if src_root_path is not None:
for root, directories, filenames in os.walk(src_root_path):
entries = []
for filename in filenames:
full_file_name = os.path.join(root, filename)
if os.path.isfile(full_file_name) and not filename.startswith('.'):
files.append(os.path.join(root, filename))
return files
def pyminizip_zipper(folder_path, output_path, password):
paths = get_paths_recursively(folder_path)
roots = []
for path in paths:
roots.append(os.path.dirname(path.replace(os.path.dirname(folder_path), './')))
pyminizip.compress_multiple(paths, roots, output_path, password, 5)

Related

Python - How to create a csv, if csv already exists [duplicate]

Does Python have any built-in functionality to add a number to a filename if it already exists?
My idea is that it would work the way certain OS's work - if a file is output to a directory where a file of that name already exists, it would append a number or increment it.
I.e: if "file.pdf" exists it will create "file2.pdf", and next time "file3.pdf".
I ended up writing my own simple function for this. Primitive, but gets the job done:
def uniquify(path):
filename, extension = os.path.splitext(path)
counter = 1
while os.path.exists(path):
path = filename + " (" + str(counter) + ")" + extension
counter += 1
return path
In a way, Python has this functionality built into the tempfile module. Unfortunately, you have to tap into a private global variable, tempfile._name_sequence. This means that officially, tempfile makes no guarantee that in future versions _name_sequence even exists -- it is an implementation detail.
But if you are okay with using it anyway, this shows how you can create uniquely named files of the form file#.pdf in a specified directory such as /tmp:
import tempfile
import itertools as IT
import os
def uniquify(path, sep = ''):
def name_sequence():
count = IT.count()
yield ''
while True:
yield '{s}{n:d}'.format(s = sep, n = next(count))
orig = tempfile._name_sequence
with tempfile._once_lock:
tempfile._name_sequence = name_sequence()
path = os.path.normpath(path)
dirname, basename = os.path.split(path)
filename, ext = os.path.splitext(basename)
fd, filename = tempfile.mkstemp(dir = dirname, prefix = filename, suffix = ext)
tempfile._name_sequence = orig
return filename
print(uniquify('/tmp/file.pdf'))
I was trying to implement the same thing in my project but #unutbu's answer seemed too 'heavy' for my needs so I came up with following code finally:
import os
index = ''
while True:
try:
os.makedirs('../hi'+index)
break
except WindowsError:
if index:
index = '('+str(int(index[1:-1])+1)+')' # Append 1 to number in brackets
else:
index = '(1)'
pass # Go and try create file again
Just in case someone stumbled upon this and requires something simpler.
If all files being numbered isn't a problem, and you know beforehand the name of the file to be written, you could simply do:
import os
counter = 0
filename = "file{}.pdf"
while os.path.isfile(filename.format(counter)):
counter += 1
filename = filename.format(counter)
recently I encountered the same thing and here is my approach:
import os
file_name = "file_name.txt"
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".txt")[0] + str(expand) + ".txt"
if os.path.isfile(new_file_name):
continue
else:
file_name = new_file_name
break
Let's say you already have those files:
This function generates the next available non-already-existing filename, by adding a _1, _2, _3, ... suffix before the extension if necessary:
import os
def nextnonexistent(f):
fnew = f
root, ext = os.path.splitext(f)
i = 0
while os.path.exists(fnew):
i += 1
fnew = '%s_%i%s' % (root, i, ext)
return fnew
print(nextnonexistent('foo.txt')) # foo_3.txt
print(nextnonexistent('bar.txt')) # bar_1.txt
print(nextnonexistent('baz.txt')) # baz.txt
Since the tempfile hack A) is a hack and B) still requires a decent amount of code anyway, I went with a manual implementation. You basically need:
A way to Safely create a file if and only if it does not exist (this is what the tempfile hack affords us).
A generator for filenames.
A wrapping function to hide the mess.
I defined a safe_open that can be used just like open:
def iter_incrementing_file_names(path):
"""
Iterate incrementing file names. Start with path and add " (n)" before the
extension, where n starts at 1 and increases.
:param path: Some path
:return: An iterator.
"""
yield path
prefix, ext = os.path.splitext(path)
for i in itertools.count(start=1, step=1):
yield prefix + ' ({0})'.format(i) + ext
def safe_open(path, mode):
"""
Open path, but if it already exists, add " (n)" before the extension,
where n is the first number found such that the file does not already
exist.
Returns an open file handle. Make sure to close!
:param path: Some file name.
:return: Open file handle... be sure to close!
"""
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
if 'b' in mode and platform.system() == 'Windows':
flags |= os.O_BINARY
for filename in iter_incrementing_file_names(path):
try:
file_handle = os.open(filename, flags)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
return os.fdopen(file_handle, mode)
# Example
with safe_open("some_file.txt", "w") as fh:
print("Hello", file=fh)
I haven't tested this yet but it should work, iterating over possible filenames until the file in question does not exist at which point it breaks.
def increment_filename(fn):
fn, extension = os.path.splitext(path)
n = 1
yield fn + extension
for n in itertools.count(start=1, step=1)
yield '%s%d.%s' % (fn, n, extension)
for filename in increment_filename(original_filename):
if not os.isfile(filename):
break
This works for me.
The initial file name is 0.yml, if it exists, it will add one until meet the requirement
import os
import itertools
def increment_filename(file_name):
fid, extension = os.path.splitext(file_name)
yield fid + extension
for n in itertools.count(start=1, step=1):
new_id = int(fid) + n
yield "%s%s" % (new_id, extension)
def get_file_path():
target_file_path = None
for file_name in increment_filename("0.yml"):
file_path = os.path.join('/tmp', file_name)
if not os.path.isfile(file_path):
target_file_path = file_path
break
return target_file_path
import os
class Renamer():
def __init__(self, name):
self.extension = name.split('.')[-1]
self.name = name[:-len(self.extension)-1]
self.filename = self.name
def rename(self):
i = 1
if os.path.exists(self.filename+'.'+self.extension):
while os.path.exists(self.filename+'.'+self.extension):
self.filename = '{} ({})'.format(self.name,i)
i += 1
return self.filename+'.'+self.extension
I found that the os.path.exists() conditional function did what I needed. I'm using a dictionary-to-csv saving as an example, but the same logic could work for any file type:
import os
def smart_save(filename, dict):
od = filename + '_' # added underscore before number for clarity
for i in np.arange(0,500,1): # I set an arbitrary upper limit of 500
d = od + str(i)
if os.path.exists(d + '.csv'):
pass
else:
with open(d + '.csv', 'w') as f: #or any saving operation you need
for key in dict.keys():
f.write("%s,%s\n"%(key, dictionary[key]))
break
Note: this appends a number (starting at 0) to the file name by default, but it's easy to shift that around.
This function validates if the file name exists using regex expresion and recursion
def validate_outfile_name(input_path):
filename, extension = os.path.splitext(input_path)
if os.path.exists(input_path):
output_path = ""
pattern = '\([0-9]\)'
match = re.search(pattern, filename)
if match:
version = filename[match.start() + 1]
try: new_version = int(version) + 1
except: new_version = 1
output_path = f"{filename[:match.start()]}({new_version}){extension}"
output_path = validate_outfile_name(output_path)
else:
version = 1
output_path = f"{filename}({version}){extension}"
return output_path
else:
return input_path
I've implemented a similar solution with pathlib:
Create file-names that match the pattern path/<file-name>-\d\d.ext. Perhaps this solution can help...
import pathlib
from toolz import itertoolz as itz
def file_exists_add_number(path_file_name, digits=2):
pfn = pathlib.Path(path_file_name)
parent = pfn.parent # parent-dir of file
stem = pfn.stem # file-name w/o extension
suffix = pfn.suffix # NOTE: extension starts with '.' (dot)!
try:
# search for files ending with '-\d\d.ext'
last_file = itz.last(parent.glob(f"{stem}-{digits * '?'}{suffix}"))
except:
curr_no = 1
else:
curr_no = int(last_file.stem[-digits:]) + 1
# int to string and add leading zeros
curr_no = str(last_no).zfill(digits)
path_file_name = parent / f"{stem}-{curr_no}{suffix}"
return str(path_file_name)
Pls note: That solution starts at 01 and will only find file-pattern containing -\d\d!
def create_file():
counter = 0
filename = "file"
while os.path.isfile(f"dir/{filename}{counter}.txt"):
counter += 1
print(f"{filename}{counter}.txt")
A little bit later but there is still something like this should work properly, mb it will be useful for someone.
You can use built-in iterator to do this ( image downloader as example for you ):
def image_downloader():
image_url = 'some_image_url'
for count in range(10):
image_data = requests.get(image_url).content
with open(f'image_{count}.jpg', 'wb') as handler:
handler.write(image_data)
Files will increment properly. Result is:
image.jpg
image_0.jpg
image_1.jpg
image_2.jpg
image_3.jpg
image_4.jpg
image_5.jpg
image_6.jpg
image_7.jpg
image_8.jpg
image_9.jpg
Easy way for create new file if this name in your folder
if 'sample.xlsx' in os.listdir('testdir/'):
i = 2
while os.path.exists(f'testdir/sample ({i}).xlsx'):
i += 1
wb.save(filename=f"testdir/sample ({i}).xlsx")
else:
wb.save(filename=f"testdir/sample.xlsx")

python script searching for a string in files in directory and subdirectories

I have a python script that searches for a string in files in a directory and its subdirectories.
import os
from sys import argv
print(argv)
searchStr = argv[1]
def searchDir(dirCurrent):
try:
main_directory = os.listdir(dirCurrent)
for item in main_directory:
item_path = os.path.join(dirCurrent, item)
if os.path.isdir(item_path) == True:
searchDir(item_path)
else:
f = open(item_path, 'r')
file_contents = f.read()
if searchStr in file_contents:
print("found in file " + item_path)
except:
print("Unable to access the directory " + dirCurrent)
searchDir("C:\\Users\\myname-adm\\Documents")
It runs, but when it encounters folders without read permissions, the script stops. How can I modify it so it can keep on searching while skipping the folders without read access?
Thank you for your help.
This should do the trick:
import os from sys import argv
print(argv)
searchStr = argv[1]
def searchDir(dirCurrent):
main_directory = os.listdir(dirCurrent)
for item in main_directory:
try:
item_path = os.path.join(dirCurrent, item)
if os.path.isdir(item_path) == True:
searchDir(item_path)
else:
f = open(item_path, 'r')
file_contents = f.read()
if searchStr in file_contents:
print("found in file " + item_path)
except:
print("Unable to access the directory " + dirCurrent)
searchDir("C:\\Users\\myname-adm\\Documents")

What is the right way to refer bazel data files in python?

Suppose I have the following BUILD file
py_library(
name = "foo",
src = ["foo.py"],
data = ["//bar:data.json"],
)
How should I refer to the data.json in foo.py file? I wanted to have something like below, what should I use for some_path?
with open(os.path.join(some_path, "bar/data.json"), 'r') as fp:
data = json.load(fp)
I couldn't find much general documentation about *.runfiles online -- any pointer will be appreciated!
Short answer: os.path.dirname(__file__)
Here is the full example:
$ ls
bar/ BUILD foo.py WORKSPACE
$ cat BUILD
py_binary(
name = "foo",
srcs = ["foo.py"],
data = ["//bar:data.json"],
)
$ cat foo.py
import json
import os
ws = os.path.dirname(__file__)
with open(os.path.join(ws, "bar/data.json"), 'r') as fp:
print(json.load(fp))
$ cat bar/BUILD
exports_files(["data.json"])
$ bazel run :foo
Edit: it doesn't work well when your package is in a subdirectory. You may need to go back using os.path.dirname.
Here is a function that should return the path to the runfiles root for any py_binary in all the cases that I'm aware of:
import os
import re
def find_runfiles():
"""Find the runfiles tree (useful when _not_ run from a zip file)"""
# Follow symlinks, looking for my module space
stub_filename = os.path.abspath(sys.argv[0])
while True:
# Found it?
module_space = stub_filename + '.runfiles'
if os.path.isdir(module_space):
break
runfiles_pattern = r"(.*\.runfiles)"
matchobj = re.match(runfiles_pattern, os.path.abspath(sys.argv[0]))
if matchobj:
module_space = matchobj.group(1)
break
raise RuntimeError('Cannot find .runfiles directory for %s' %
sys.argv[0])
return module_space
For the example in your question you could use it like so:
with open(os.path.join(find_runfiles(), "name_of_workspace/bar/data.json"), 'r') as fp:
data = json.load(fp)
Note that this function won't help if you build zipped executables of your python apps (using subpar, probably); for those you will need some more code. This next snippet includes get_resource_filename() and get_resource_directory(), which will work for both regular py_binary and .par binaries:
import atexit
import os
import re
import shutil
import sys
import tempfile
import zipfile
def get_resource_filename(path):
zip_path = get_zip_path(sys.modules.get("__main__").__file__)
if zip_path:
tmpdir = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(tmpdir, ignore_errors=True))
zf = BetterZipFile(zip_path)
zf.extract(member=path, path=tmpdir)
return os.path.join(tmpdir, path)
elif os.path.exists(path):
return path
else:
path_in_runfiles = os.path.join(find_runfiles(), path)
if os.path.exists(path_in_runfiles):
return path_in_runfiles
else:
raise ResourceNotFoundError
def get_resource_directory(path):
"""Find or extract an entire subtree and return its location."""
zip_path = get_zip_path(sys.modules.get("__main__").__file__)
if zip_path:
tmpdir = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(tmpdir, ignore_errors=True))
zf = BetterZipFile(zip_path)
members = []
for fn in zf.namelist():
if fn.startswith(path):
members += [fn]
zf.extractall(members=members, path=tmpdir)
return os.path.join(tmpdir, path)
elif os.path.exists(path):
return path
else:
path_in_runfiles = os.path.join(find_runfiles(), path)
if os.path.exists(path_in_runfiles):
return path_in_runfiles
else:
raise ResourceNotFoundError
def get_zip_path(path):
"""If path is inside a zip file, return the zip file's path."""
if path == os.path.sep:
return None
elif zipfile.is_zipfile(path):
return path
return get_zip_path(os.path.dirname(path))
class ResourceNotFoundError(RuntimeError):
pass
def find_runfiles():
"""Find the runfiles tree (useful when _not_ run from a zip file)"""
# Follow symlinks, looking for my module space
stub_filename = os.path.abspath(sys.argv[0])
while True:
# Found it?
module_space = stub_filename + '.runfiles'
if os.path.isdir(module_space):
break
runfiles_pattern = r"(.*\.runfiles)"
matchobj = re.match(runfiles_pattern, os.path.abspath(sys.argv[0]))
if matchobj:
module_space = matchobj.group(1)
break
raise RuntimeError('Cannot find .runfiles directory for %s' %
sys.argv[0])
return module_space
class BetterZipFile(zipfile.ZipFile):
"""Shim around ZipFile that preserves permissions on extract."""
def extract(self, member, path=None, pwd=None):
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
ret_val = self._extract_member(member, path, pwd)
attr = member.external_attr >> 16
os.chmod(ret_val, attr)
return ret_val
Using this second code snippet, your example would look like:
with open(get_resource_filename("name_of_workspace/bar/data.json"), 'r') as fp:
data = json.load(fp)

How to unzip a file with Python 2.4?

I'm having a hard time figuring out how to unzip a zip file with 2.4. extract() is not included in 2.4. I'm restricted to using 2.4.4 on my server.
Can someone please provide a simple code example?
You have to use namelist() and extract(). Sample considering directories
import zipfile
import os.path
import os
zfile = zipfile.ZipFile("test.zip")
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
print "Decompressing " + filename + " on " + dirname
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname)
There's some problem with Vinko's answer (at least when I run it). I got:
IOError: [Errno 13] Permission denied: '01org-webapps-countingbeads-422c4e1/'
Here's how to solve it:
# unzip a file
def unzip(path):
zfile = zipfile.ZipFile(path)
for name in zfile.namelist():
(dirname, filename) = os.path.split(name)
if filename == '':
# directory
if not os.path.exists(dirname):
os.mkdir(dirname)
else:
# file
fd = open(name, 'w')
fd.write(zfile.read(name))
fd.close()
zfile.close()
Modifying Ovilia's answer so that you can specify the destination directory as well:
def unzip(zipFilePath, destDir):
zfile = zipfile.ZipFile(zipFilePath)
for name in zfile.namelist():
(dirName, fileName) = os.path.split(name)
if fileName == '':
# directory
newDir = destDir + '/' + dirName
if not os.path.exists(newDir):
os.mkdir(newDir)
else:
# file
fd = open(destDir + '/' + name, 'wb')
fd.write(zfile.read(name))
fd.close()
zfile.close()
Not fully tested, but it should be okay:
import os
from zipfile import ZipFile, ZipInfo
class ZipCompat(ZipFile):
def __init__(self, *args, **kwargs):
ZipFile.__init__(self, *args, **kwargs)
def extract(self, member, path=None, pwd=None):
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path)
def extractall(self, path=None, members=None, pwd=None):
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path)
def _extract_member(self, member, targetpath):
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
target = file(targetpath, "wb")
try:
target.write(self.read(member.filename))
finally:
target.close()
return targetpath
I am testing in Python 2.7.3rc2 and the the ZipFile.namelist() is not returning an entry with just the sub directory name for creating a sub directory, but only a list of file names with sub directory, as follows:
['20130923104558/control.json', '20130923104558/test.csv']
Thus the check
if fileName == '':
does not evaluate to True at all.
So I modified the code to check if the dirName exists inside destDir and to create dirName if it does not exist. File is extracted only if fileName part is not empty. So this should take care of the condition where a directory name can appear in ZipFile.namelist()
def unzip(zipFilePath, destDir):
zfile = zipfile.ZipFile(zipFilePath)
for name in zfile.namelist():
(dirName, fileName) = os.path.split(name)
# Check if the directory exisits
newDir = destDir + '/' + dirName
if not os.path.exists(newDir):
os.mkdir(newDir)
if not fileName == '':
# file
fd = open(destDir + '/' + name, 'wb')
fd.write(zfile.read(name))
fd.close()
zfile.close()

Problem with file cleaning process?

I am working on very large file system. My task is to clean the system with some given parameters. Below program fragment can give a idea.
import DirectoryWalker
extentions_to_delete = list([".rar",".doc",".URL",".js",".EXE",".mht",".css",".txt", ".cache", ".xml"])
extentions_to_copy = list([".jpg",".BMP",".GIF",".jpeg",".gif",".bmp",".png",".JPG"])
dw = DirectoryWalker.DirectoryWalker("/media/08247451247443AA/home/crap/")
def copy_advice(key, files):
for ext in extentions_to_copy:
if(ext == key):
print str(len(files)) + " Files of type " + key + " should be coppied to the target folder."
for file in files:
copy_to = "/media/08247451247443AA/home/crap-pics/"
moved = dw.move_file_to(file, copy_to, True)
if not moved:
print file + " : not moved"
walks = dw.get_all_file_types()
for key in DirectoryWalker.Walk.store.keys():
files = DirectoryWalker.Walk.store[key]
copy_advice(key, files)
In the DirectoryWalker following code is written. Walk is a simple class which have a store object.
def get_all_file_types(self):
extentions = []
for dirpath,dirnames,filenames in os.walk(self.dir_name):
for file in filenames:
extentions.append(Walk(dirpath +"/"+ file))
return extentions
def move_file_to(self, file_path, copy_to, rename_if_exists= False):
file_name = os.path.split(file_path)[1]
target_file_name = copy_to + file_name;
coppied = False
if not os.path.isfile(target_file_name):
coppied = True
try:
os.rename(file_path, target_file_name)
except OSError:
coppied = False
print "Oops! Unable to rename : " + file_path + " to target : " + target_file_name
if rename_if_exists:
coppied = True
file_name = "new_"+ file_name
try:
os.rename(file_path, target_file_name)
except OSError:
coppied = False
print "Oops! Unable to rename : " + file_path + " to target : " + target_file_name
return coppied
The Walk class
class Walk:
store = dict([])
def __init__(self, filename):
self.file_ext = os.path.splitext(filename)[-1]
self.file_name = filename
if not (Walk.store.has_key(self.file_ext)):
Walk.store[self.file_ext] = list()
Walk.store[self.file_ext].append(self.file_name)
But when program executed, it only moves almost 10400 files. But manual calculation suggest, there should be 13400 files in the file system. Please let me know, what I am doing wrong?
Update Solutions
After a careful investigations, I come out with result that there are many ambiguous file names in the target file system and those files were missing.
To answer your question, why not start with a simpler piece of code to test?
import os
all_files = []
for root, dirs, files in os.walk('/media/08247451247443AA/home/crap/'):
all_files.extend(files)
print len(all_files)
As a side note, could you replace the Walk class with a defaultdict?
After a careful investigations, I come out with result that there are many ambiguous file names in the target file system and those files were missing.

Categories

Resources