python check folder path with * - python

checking folder path with "*"
here is what i have tried.
import os
checkdir = "/lib/modules/*/kernel/drivers/char"
path = os.path.exists(checkIPMI_dir)
print path
False
this will always print False, is there other way that i can print it to true?
i know when i put static path, it can
import os
checkdir = "/lib/modules"
path = os.path.exists(checkIPMI_dir)
print path
i cant put static, because i have a few linux OS, that uses different kernel
version, there for the number will not be the same.

This may not solve your problem, but a while ago I needed to be able to recurse an unknown number of times through a directory structure to find a file, so I wrote this function:
import os
import fnmatch
def get_files(root, patterns='*', depth=1, yield_folders=False):
"""
Return all of the files matching patterns, in pattern.
Only search to teh specified depth
Arguments:
root - Top level directory for search
patterns - comma separated list of Unix style
wildcards to match NOTE: This is not
a regular expression.
depth - level of subdirectory search, default
1 level down
yield_folders - True folder names should be returned
default False
Returns:
generator of files meeting search criteria for all
patterns in argument patterns
"""
# Determine the depth of the root directory
root_depth = len(root.split(os.sep))
# Figure out what patterns we are matching
patterns = patterns.split(';')
for path, subdirs, files in os.walk(root):
# Are we including directories in search?
if yield_folders:
files.extend(subdirs)
files.sort()
for name in files:
for pattern in patterns:
# Determine if we've exceeded the depth of the
# search?
cur_depth = len(path.split(os.sep))
if (cur_depth - root_depth) > depth:
break
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
break
With this function you could check to see if a file exists with the following:
checkdir = "/lib/modules/*/kernel/drivers/char"
matches = get_files(checkdir, depth=100, yield_folders=True)
found = True if matches else False
All this may be overkill, but it should work!

Related

python: How to get latest file in a directory with certain pattern

I want the latest file in a directory with certain pattern. I can find the latest file but don't know how to include pattern. Please try to propose solution involving os library only.
def newest(DIR_PATH):
files = os.listdir(DIR_PATH)
FILE_LIST = [os.path.join(DIR_PATH, BASENAME) for BASENAME in files]
return max(FILE_LIST, key=os.path.getctime)
The directory is having many kinds of files. For example consider below two kind of files.
xyz-2019-11-17_01-25-14.json
xyz-2019-11-17_01-25-14-trimmed.json
I want to get the latest file that does not end with '-trimmed.json'.Please suggest.
You could simply go like this:
def newest(DIR_PATH):
files = os.listdir(DIR_PATH)
FILE_LIST = [os.path.join(DIR_PATH, BASENAME) for BASENAME in files if not BASENAME.endswith("trimmed.json")]
return max(FILE_LIST, key=os.path.getctime)
you could probably use
import os
from pathlib import Path as makePath
def find_youngest(path, pattern, n=1):
"""
find the file that matches a pattern and has the highest modification
timestamp if there are multiple files that match the pattern.
input:
path, string or pathlib.Path, where to look for the file(s)
pattern, string, pattern to look for in filename
n, integer, how many to return. defaults to 1
returns
filename(s) of youngest file(s), including path.
None if no file
"""
assert n >= 1, "n must be greater equal 1."
path = makePath(path)
files = [makePath(f) for f in path.glob(pattern) if os.path.isfile(f)]
sortfiles = sorted(files, key=lambda x: os.path.getmtime(x), reverse=True)
if sortfiles:
return sortfiles[:n]
return None
Note: if you use pathlib.Path.glob, you can also use regex patterns for string matching.
A simple way to select files base on the occurance of a specific filename ending could be
files = ['xyz-2019-11-17_01-25-14.json',
'xyz-2019-11-17_01-25-14-trimmed.json']
select = [f for f in files if not f.endswith('-trimmed.json')]
# select
# Out[35]: ['xyz-2019-11-17_01-25-14.json']
There's a library called glob. Check it out.
https://docs.python.org/3/library/glob.html
This solution is almost same as what was given here by "Florian H" with one minor difference, if the pattern is somewhere in between the filename where endswith is not relevant.
def newest(DIR_PATH):
files = os.listdir(DIR_PATH)
FILE_LIST = [os.path.join(DIR_PATH, BASENAME) for BASENAME in files if "trimmed" not in BASENAME]
return max(FILE_LIST, key=os.path.getctime)

Delete certain files from a directory using regex regarding a specific detail in file names

Here I am making attempts to create a code which would delete files in a folder according to the mask. All files what include 17 should be removed and the general format of files in folder is ??_????17*.* where ? - Any symbol 1..n,A..z; * - any length of symbols; _ and 17 - are in any files (other files contain 18, as well) and its extension doesn't matter. Certain example of a files from folder: AB_DEFG17Something.Anything - Copy (2).txt; AB_DEFG18Something.Some - Copy (3).txt...
p.s. apologize for the previous insufficient and inexact explanation. You were right about globe.globe in case if files are named similarly.
Would be glad to receive points of view about this task, I hope it will useful for someone else.
import os
import re
dir_name = "/Python/Test_folder" # open the folder and read files
testfolder = os.listdir(dir_name)
def matching(r, s): # condition if there's nothing to match
match = re.search(r, s)
if match:
return "Files don't exist!"
matching(r'^\w\w\[_]\w\w\w\w\[1]\[7]\w+\[.]\w+', testfolder) # matching the mask of files
for item in testfolder.index(matching):
if item.name(matching, s):
os.remove(os.path.join(dir_name, item))
# format of filenames not converted : ??_????17*.*
All files in a folder with pattern ??_????17*.* will be deleted with this code:
import os
import re
dir_name = "/Python/Test_folder" # open the folder and read files
testfolder = os.listdir(dir_name)
p = re.compile(r'^[1-9\w]{2}_[1-9\w]{4}[1][7][\w]+\.[\w]+')
for each in testfolder:
k = p.match(each)
if k == None:
continue
os.remove(os.path.join(dir_name, each))
Hope this is what you need.

Sanitizing a file path in python

I have a file browser application that exposes a directory and its contents to the users.
I want to sanitize the user input, which is a file path, so that it does not allow absolute paths such as '/tmp/' and relative paths such as '../../etc'
Is there a python function that does this across platforms?
Also for people searching for a way to get rid of A/./B -> A/B and A/B/../C -> A/C in paths.
You can use os.path.normpath for that.
A comprehensive filepath sanitiser for python
I wasn't really satisfied with any of the available methods for sanitising a path, so I wrote my own, relatively comprehensive path sanitiser. This is suitable* for taking input from a public endpoint (http upload, REST endpoint, etc) and ensuring that if you save data at the resulting file path, it will not damage your system**. (Note: this code targets Python 3+, you'll probably need to make some changes to make it work on 2.x)
* No guarantees! Please don't rely on this code without checking it thoroughly yourself.
** Again, no guarantees! You could still do something crazy and set your root path on a *nix system to /dev/ or /bin/ or something like that. Don't do that. There are also some edge cases on Windows that could cause damage (device file names, for example), you could check the secure_filename method from werkzeug's utils for a good start on dealing with these if you're targeting Windows.
How it works
You need to specify a root path, the sanitiser will ensure that all paths returned are under this root. Check the get_root_path function for where to do this. Make sure the value for the root path is from your own configuration, not input from the user!
There is a file name sanitiser which:
Converts unicode to ASCII
Converts path separators to underscores
Only allows certain characters from a whitelist in the file name. The whitelist includes all lower and uppercase letters, all digits, the hyphen, the underscore, the space, opening and closing round brackets and the full stop character (period). You can customise this whitelist if you want to.
Ensures all names have at least one letter or number (to avoid names like '..')
To get a valid file path, you should call make_valid_file_path. You can optionally pass it a subdirectory path in the path parameter. This is the path underneath the root path, and can come from user input. You can optionally pass it a file name in the filename parameter, this can also come from user input. Any path information in the file name you pass will not be used to determine the path of the file, instead it will be flattened into valid, safe components of the file's name.
If there is no path or filename, it will return the root path, correctly formatted for the host file system, with a trailing path separator (/).
If there is a subdirectory path, it will split it into its component parts, sanitising each with the file name sanitiser and rebuilding the path without a leading path separator.
If there is a file name, it will sanitise the name with the sanitiser.
It will os.path.join the path components to get a final path to your file.
As a final double-check that the resulting path is valid and safe, it checks that the resulting path is somewhere under the root path. This check is done properly by splitting up and comparing the component parts of the path, rather than just ensuring one string starts with another.
OK, enough warnings and description, here's the code:
import os
def ensure_directory_exists(path_directory):
if not os.path.exists(path_directory):
os.makedirs(path_directory)
def os_path_separators():
seps = []
for sep in os.path.sep, os.path.altsep:
if sep:
seps.append(sep)
return seps
def sanitise_filesystem_name(potential_file_path_name):
# Sort out unicode characters
valid_filename = normalize('NFKD', potential_file_path_name).encode('ascii', 'ignore').decode('ascii')
# Replace path separators with underscores
for sep in os_path_separators():
valid_filename = valid_filename.replace(sep, '_')
# Ensure only valid characters
valid_chars = "-_.() {0}{1}".format(string.ascii_letters, string.digits)
valid_filename = "".join(ch for ch in valid_filename if ch in valid_chars)
# Ensure at least one letter or number to ignore names such as '..'
valid_chars = "{0}{1}".format(string.ascii_letters, string.digits)
test_filename = "".join(ch for ch in potential_file_path_name if ch in valid_chars)
if len(test_filename) == 0:
# Replace empty file name or file path part with the following
valid_filename = "(Empty Name)"
return valid_filename
def get_root_path():
# Replace with your own root file path, e.g. '/place/to/save/files/'
filepath = get_file_root_from_config()
filepath = os.path.abspath(filepath)
# ensure trailing path separator (/)
if not any(filepath[-1] == sep for sep in os_path_separators()):
filepath = '{0}{1}'.format(filepath, os.path.sep)
ensure_directory_exists(filepath)
return filepath
def path_split_into_list(path):
# Gets all parts of the path as a list, excluding path separators
parts = []
while True:
newpath, tail = os.path.split(path)
if newpath == path:
assert not tail
if path and path not in os_path_separators():
parts.append(path)
break
if tail and tail not in os_path_separators():
parts.append(tail)
path = newpath
parts.reverse()
return parts
def sanitise_filesystem_path(potential_file_path):
# Splits up a path and sanitises the name of each part separately
path_parts_list = path_split_into_list(potential_file_path)
sanitised_path = ''
for path_component in path_parts_list:
sanitised_path = '{0}{1}{2}'.format(sanitised_path, sanitise_filesystem_name(path_component), os.path.sep)
return sanitised_path
def check_if_path_is_under(parent_path, child_path):
# Using the function to split paths into lists of component parts, check that one path is underneath another
child_parts = path_split_into_list(child_path)
parent_parts = path_split_into_list(parent_path)
if len(parent_parts) > len(child_parts):
return False
return all(part1==part2 for part1, part2 in zip(child_parts, parent_parts))
def make_valid_file_path(path=None, filename=None):
root_path = get_root_path()
if path:
sanitised_path = sanitise_filesystem_path(path)
if filename:
sanitised_filename = sanitise_filesystem_name(filename)
complete_path = os.path.join(root_path, sanitised_path, sanitised_filename)
else:
complete_path = os.path.join(root_path, sanitised_path)
else:
if filename:
sanitised_filename = sanitise_filesystem_name(filename)
complete_path = os.path.join(root_path, sanitised_filename)
else:
complete_path = complete_path
complete_path = os.path.abspath(complete_path)
if check_if_path_is_under(root_path, complete_path):
return complete_path
else:
return None
This will prevent the user inputting filenames like ../../../../etc/shadow but will also not allow files in subdirs below basedir (i.e. basedir/subdir/moredir is blocked):
from pathlib import Path
test_path = (Path(basedir) / user_input).resolve()
if test_path.parent != Path(basedir).resolve():
raise Exception(f"Filename {test_path} is not in {Path(basedir)} directory")
If you want to allow subdirs below basedir:
if not Path(basedir).resolve() in test_path.resolve().parents:
raise Exception(f"Filename {test_path} is not in {Path(basedir)} directory")
I ended up here looking for a quick way to handle my use case and ultimately wrote my own. What I needed was a way to take in a path and force it to be in the CWD. This is for a CI system working on mounted files.
def relative_path(the_path: str) -> str:
'''
Force the spec path to be relative to the CI workspace
Sandboxes the path so that you can't escape out of CWD
'''
# Make the path absolute
the_path = os.path.abspath(the_path)
# If it started with a . it'll now be /${PWD}/
# We'll get the path relative to cwd
if the_path.startswith(os.getcwd()):
the_path = '{}{}'.format(os.sep, os.path.relpath(the_path))
# Prepend the path with . and it'll now be ./the/path
the_path = '.{}'.format(the_path)
return the_path
In my case I didn't want to raise an exception. I just want to force that any path given will become an absolute path in the CWD.
Tests:
def test_relative_path():
assert relative_path('../test') == './test'
assert relative_path('../../test') == './test'
assert relative_path('../../abc/../test') == './test'
assert relative_path('../../abc/../test/fixtures') == './test/fixtures'
assert relative_path('../../abc/../.test/fixtures') == './.test/fixtures'
assert relative_path('/test/foo') == './test/foo'
assert relative_path('./test/bar') == './test/bar'
assert relative_path('.test/baz') == './.test/baz'
assert relative_path('qux') == './qux'
This is an improvement on #mneil's solution, using relpath's secret second argument:
import os.path
def sanitize_path(path):
"""
Sanitize a path against directory traversals
>>> sanitize_path('../test')
'test'
>>> sanitize_path('../../test')
'test'
>>> sanitize_path('../../abc/../test')
'test'
>>> sanitize_path('../../abc/../test/fixtures')
'test/fixtures'
>>> sanitize_path('../../abc/../.test/fixtures')
'.test/fixtures'
>>> sanitize_path('/test/foo')
'test/foo'
>>> sanitize_path('./test/bar')
'test/bar'
>>> sanitize_path('.test/baz')
'.test/baz'
>>> sanitize_path('qux')
'qux'
"""
# - pretending to chroot to the current directory
# - cancelling all redundant paths (/.. = /)
# - making the path relative
return os.path.relpath(os.path.normpath(os.path.join("/", path)), "/")
if __name__ == '__main__':
import doctest
doctest.testmod()
To be very specific to the question asked but raise an exception rather than converting the path to relative:
path = 'your/path/../../to/reach/root'
if '../' in path or path[:1] == '/':
raise Exception

Extract files from zip without keep the top-level folder with python zipfile

I'm using the current code to extract the files from a zip file while keeping the directory structure:
zip_file = zipfile.ZipFile('archive.zip', 'r')
zip_file.extractall('/dir/to/extract/files/')
zip_file.close()
Here is a structure for an example zip file:
/dir1/file.jpg
/dir1/file1.jpg
/dir1/file2.jpg
At the end I want this:
/dir/to/extract/file.jpg
/dir/to/extract/file1.jpg
/dir/to/extract/file2.jpg
But it should ignore only if the zip file has a top-level folder with all files inside it, so when I extract a zip with this structure:
/dir1/file.jpg
/dir1/file1.jpg
/dir1/file2.jpg
/dir2/file.txt
/file.mp3
It should stay like this:
/dir/to/extract/dir1/file.jpg
/dir/to/extract/dir1/file1.jpg
/dir/to/extract/dir1/file2.jpg
/dir/to/extract/dir2/file.txt
/dir/to/extract/file.mp3
Any ideas?
If I understand your question correctly, you want to strip any common prefix directories from the items in the zip before extracting them.
If so, then the following script should do what you want:
import sys, os
from zipfile import ZipFile
def get_members(zip):
parts = []
# get all the path prefixes
for name in zip.namelist():
# only check files (not directories)
if not name.endswith('/'):
# keep list of path elements (minus filename)
parts.append(name.split('/')[:-1])
# now find the common path prefix (if any)
prefix = os.path.commonprefix(parts)
if prefix:
# re-join the path elements
prefix = '/'.join(prefix) + '/'
# get the length of the common prefix
offset = len(prefix)
# now re-set the filenames
for zipinfo in zip.infolist():
name = zipinfo.filename
# only check files (not directories)
if len(name) > offset:
# remove the common prefix
zipinfo.filename = name[offset:]
yield zipinfo
args = sys.argv[1:]
if len(args):
zip = ZipFile(args[0])
path = args[1] if len(args) > 1 else '.'
zip.extractall(path, get_members(zip))
Read the entries returned by ZipFile.namelist() to see if they're in the same directory, and then open/read each entry and write it to a file opened with open().
This might be a problem with the zip archive itself. In a python prompt try this to see if the files are in the correct directories in the zip file itself.
import zipfile
zf = zipfile.ZipFile("my_file.zip",'r')
first_file = zf.filelist[0]
print file_list.filename
This should say something like "dir1"
repeat the steps above substituting and index of 1 into filelist like so first_file = zf.filelist[1] This time the output should look like 'dir1/file1.jpg' if this is not the case then the zip file does not contain directories and will be unzipped all to one single directory.
Based on the #ekhumoro's answer I come up with a simpler funciton to extract everything on the same level, it is not exactly what you are asking but I think can help someone.
def _basename_members(self, zip_file: ZipFile):
for zipinfo in zip_file.infolist():
zipinfo.filename = os.path.basename(zipinfo.filename)
yield zipinfo
from_zip="some.zip"
to_folder="some_destination/"
with ZipFile(file=from_zip, mode="r") as zip_file:
os.makedirs(to_folder, exist_ok=True)
zip_infos = self._basename_members(zip_file)
zip_file.extractall(path=to_folder, members=zip_infos)
Basically you need to do two things:
Identify the root directory in the zip.
Remove the root directory from the paths of other items in the zip.
The following should retain the overall structure of the zip while removing the root directory:
import typing, zipfile
def _is_root(info: zipfile.ZipInfo) -> bool:
if info.is_dir():
parts = info.filename.split("/")
# Handle directory names with and without trailing slashes.
if len(parts) == 1 or (len(parts) == 2 and parts[1] == ""):
return True
return False
def _members_without_root(archive: zipfile.ZipFile, root_filename: str) -> typing.Generator:
for info in archive.infolist():
parts = info.filename.split(root_filename)
if len(parts) > 1 and parts[1]:
# We join using the root filename, because there might be a subdirectory with the same name.
info.filename = root_filename.join(parts[1:])
yield info
with zipfile.ZipFile("archive.zip", mode="r") as archive:
# We will use the first directory with no more than one path segment as the root.
root = next(info for info in archive.infolist() if _is_root(info))
if root:
archive.extractall(path="/dir/to/extract/", members=_members_without_root(archive, root.filename))
else:
print("No root directory found in zip.")

how to use exclude option with pep8.py

I have a directory structure like this
/path/to/dir/a/foo
/path/to/dir/b/foo
and want to run pep8 on the directory /path/to/dir/ excluding /path/to/dir/a/foo
pep8 --exclude='/path/to/dir/a/foo' /path/to/dir
and the expected output of pep8 is, it should not include the files from /a/foo/
but pep8 is checking the files inside the /a/foo/ also
when I do this
pep8 --exclude='foo' /path/to/dir
it is excluding the files from both and a/foo /b/foo/
what is the pattern to be given to pep8 exclude option so that it exclude the files only from /a/foo/ but not from b/foo/ ?
You can try something like this:
pep8 --exclude='*/a/foo*' /path/to/dir
The exclude portion uses fnmatch to match against the path as seen in the source code.
def excluded(filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
for pattern in options.exclude:
if fnmatch(basename, pattern):
# print basename, 'excluded because it matches', pattern
return True
I'm sure I'm reinventing the wheel here, but I have also been unable to get the API working:
import os
import re
from pep8 import StyleGuide
def get_pyfiles(directory=None, exclusions=None, ftype='.py'):
'''generator of all ftype files in all subdirectories.
if directory is None, will look in current directory.
exclusions should be a regular expression.
'''
if directory is None:
directory = os.getcwd()
pyfiles = (os.path.join(dpath, fname)
for dpath, dnames, fnames in os.walk(directory)
for fname in [f for f in fnames
if f.endswith(ftype)])
if exclusions is not None:
c = re.compile(exclusions)
pyfiles = (fname for fname in pyfiles if c.match(fname) is None)
return pyfiles
def get_pep8_counter(directory=None, exclusions=None):
if directory is None:
directory = os.getcwd()
paths = list(get_pyfiles(directory=directory, exclusions=exclusions))
# I am only interested in counters (but you could do something else)
return StyleGuide(paths=paths).check_files().counters
counter = get_pep8_counter(exclusions='.*src.*|.*doc.*')

Categories

Resources