open files, keeping folder structure as nested list - python

I have a folder with the following structure:
data
|-folder1
|--subfolder1
|--file1
|--file2
|--subfolder2
|file1
|file2
|-folder2
|--subfolder1
|--file1
|--file2
|--subfolder2
|file1
|file2
with many folders, subfolder and files.
How can i create a list that is subdivided into smaller lists that contain my data?
For example, I'd end up with a list called data and I could retrieve file1 from folder1-subfolder1 by indexing data[0][0][0]?
As of now, I have created empty lists for each file but I'm not sure on how to append to a list of lists.
I have:
file1 = []
file2 = []
for folder in sorted(os.listdir(path)):
if folder != 'Documentation.txt':
for subfolder in sorted(os.listdir(path + '/' + folder)):
if subfolder != '.DS_Store':
for file in sorted(os.listdir(path+ '/' + folder + '/' + subfolder)):
if file.endswith(".x.dat"):
file1.append(pd.read_csv((path + '/' + folder + '/' + subfolder + '/' + file), header=None, sep=' '))
if file.endswith(".y.dat"):
file2.append(pd.read_csv((path + '/' + folder + '/' + subfolder + '/' + file), header=None, sep=' '))
data = [file1, file2]
This returns all the data files, but I'm struggling to figure out how to nest each file in a list of list according to the original folder structure... I feel like the solution will be pretty trivial, i'm just not great with python. Thanks

You could try the following with pathlib's Path.rglob() and groupby from itertools (all standard library):
from pathlib import Path
from itertools import groupby
from functools import partial
def key(i, file): return file.parent.parts[i]
base = Path("data")
data = []
for _, group1 in groupby(base.rglob("*.dat"), key=partial(key, 1)):
data.append([])
for _, group2 in groupby(group1, key=partial(key, 2)):
data[-1].append([file.name for file in group2])
With a test structure created by
base = Path("data")
for i in range(1, 4):
for j in range(1, 3):
path = (base / f"folder{i}") / f"subfolder{j}"
path.mkdir(parents=True, exist_ok=True)
for k in range(1, 3):
with open(path / f"file{i}-{j}-{k}.dat", "w") as file:
file.write("A,B,C\n1,2,3\n4,5,6")
this delivers the following data:
[[['file1-1-1.dat', 'file1-1-2.dat'], ['file1-2-1.dat', 'file1-2-2.dat']],
[['file2-1-1.dat', 'file2-1-2.dat'], ['file2-2-1.dat', 'file2-2-2.dat']],
[['file3-1-1.dat', 'file3-1-2.dat'], ['file3-2-1.dat', 'file3-2-2.dat']]]
Your code implies that you actually don't want to collect the filenames but pd.csv_read() them and store the dataframes in data. To do that you have to replace
data[-1].append([file.name for file in group2])
with
data[-1].append([pd.read_csv(file) for file in group2])
And it might well be that you have to add more logic to the file selection: I just went with the .dat suffix.
You could do something similar with os.walk instead, as suggested in the other answer:
from pathlib import Path
from os import walk
base = "data"
data = []
for root, _, files in walk(base):
if ".DS_Store" in root:
continue
num_parts = len(Path(root).parts)
if num_parts == 2:
data.append([])
elif num_parts == 3:
data[-1].append([file for file in files if file.endswith(".dat")])
resp.
data[-1].append([pd.read_csv(Path(root) / file) for file in files])

It's not clear to me what's the exact output you want, but I'm pretty sure os.walk is probably the best option for you to generate a tree of your files:
>>> import os
>>> import re
>>> data_path = '/Users/nilton/data'
>>> files_paths = []
>>> for dirpath, dirnames, filenames in os.walk(data_path):
... for filename in filenames:
... if re.match('\.dat', filename, re.I):
... files_paths.append(filename)
...
>>> files_paths
['/Users/nilton/data/folder2/subfolder2/file2.dat',
'/Users/nilton/data/folder2/subfolder2/file1.dat',
...]
Knowing this and reading the os.walk documentation, you can manage to get your desired output by playing with the 3-tuple (dirpath, dirnames, filenames) output from os.walk.

Related

How to delete all files in folder except CSV?

I wrote a dataframe to a csv in Pyspark. And I got the output files in the directory as:
._SUCCESS.crc
.part-00000-6cbfdcfd-afff-4ded-802c-6ccd67f3804a-c000.csv.crc
part-00000-6cbfdcfd-afff-4ded-802c-6ccd67f3804a-c000.csv
How do I keep only the CSV file in the directory and delete rest of the files, using Python?
import os
directory = "/path/to/directory/with/files"
files_in_directory = os.listdir(directory)
filtered_files = [file for file in files_in_directory if not file.endswith(".csv")]
for file in filtered_files:
path_to_file = os.path.join(directory, file)
os.remove(path_to_file)
first, you list all files in directory. Then, you only keep in list those, which don't end with .csv. And then, you remove all files that are left.
Try iterating over the files in the directory, and then os.remove only those files that do not end with .csv.
import os
dir_path = "path/to/the/directory/containing/files"
dir_list = os.listdir(dir_path)
for item in dir_list:
if not item.endswith(".csv"):
os.remove(os.path.join(dir_path, item))
You can also have fun with list comprehension for doing this:
import os
dir_path = 'output/'
[os.remove(os.path.join(dir_path, item)) for item in os.listdir(dir_path) if not item.endswith('.csv')]
I would recommended to use pathlib (Python >= 3.4) and the in-build type set() to substract all csv filenames from the list of all files. I would argument this is easy to read, fast to process and a good pythonic solution.
>>> from pathlib import Path
>>> p = Path('/path/to/directory/with/files')
>>> # Get all file names
>>> # https://stackoverflow.com/a/65025567/4865723
>>> set_all_files = set(filter(Path.is_file, p.glob('**/*')))
>>> # Get all csv filenames (BUT ONLY with lower case suffix!)
>>> set_csv_files = set(filter(Path.is_file, p.glob('**/*.csv')))
>>> # Create a file list without csv files
>>> set_files_to_delete = set_all_files - set_csv_files
>>> # Iteratore on that list and delete the file
>>> for file_name in set_files_to_delete:
... Path(file_name).unlink()
for (root,dirs,files) in os.walk('Test', topdown=true):
for name in files:
fp = os.path.join(root, name)
if name.endswith(".csv"):
pass
else:
os.remove(fp)
What the advandtage of os.walk?, it reads all the subdirectory in particular directory mentioned.

How can I confirm and remove the original files after sorting and copying them into several folders?

I'm a newbie and I'm trying to make office work a little less tedious. I currently have a little program that sorts and copies .pdf files from a folder into several folders, based on who these files need to be sent to later.
It works great. There's just the issue that I keep double-checking if it did its job. So then I added a bit where it counts the copied files to make checking easier.
Now I've been trying to figure out if I could make the program compare the list of files in the original folder with a list of files from all the other destination folders and then delete the originals if the files are indeed copied.
I've also resorted to having the program print the resulting file paths, but it's ugly and still requires me to manually compare.
Here's my code:
import os
import shutil
import pathlib
import pprint
dir = ('[path to original folder]')
files = os.listdir(dir)
user_data = [
('Karl H. Preusse', [Path to Karl]),
('Rom', [Path to Rom]),
('Hochschule', [Path to Hochschule]),
('Kiefer', [Path to Kiefer),
('Penny', [Path to Penny),
('Steigenberger', [Path to Steigenberger]),
('Penzkofer', [Path to Penzkofer]),
('Stoffel', [Path to Stoffel]),
('Cavertitzer', [Path to Cavertitzer])
]
for pattern, dest_dir in user_data:
matching_files = [f for f in files if pattern in f]
for filename in matching_files:
full_filename = os.path.join(dir, filename)
if os.path.isfile(full_filename):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(full_filename, dest_dir)
pprint.pprint(shutil.copy(full_filename, dest_dir))
stetje_datotek = sum(len(files) for _, _, files in os.walk([Path to directory that holds the copy folders])) #defines files to count
print('Stevilo datotek v mapi Posiljanje je: {}' .format(stetje_datotek)) #Prints out how many files are in the target folders.
Below are my attempts at getting things automated.
#I commented this function out as I couldn't figure out how to get the data out of it.
#def sub_files(folder):
# relpath = os.path.relpath
# join = os.path.join
# for path, _, files in os.walk([Path to directory that holds the copy folders]):
# relative = relpath(path, [Path to directory that holds the copy folders])
# for file in files:
# yield join(relative, file)
#print(sub_files)
Here I thought to use inputs to individually check each folder:
#print(os.listdir([Path to directory that holds the copy folders]))
#if input() == 'Penzkofer':
#pprint.pprint(os.listdir([Path to Penzkofer folder]))
And here I tried to compare lists, but I get a TypeError: unhashable type: 'list' error
prvotne_datoteke = set(os.listdir(dir))
kopirane_datoteke = set(os.walk([Path to directory that holds the copy folders])
set(prvotne_datoteke).intersection(kopirane_datoteke)
Any help is appreciated. Thank you.
One approach is to print the names of each copied file recipient and the number of recipients, then delete the original file if all intended recipients are included.
to_be_copied = set() # holds original paths of all files being copied
for pattern, dest_dir in user_data:
matching_files = [f for f in files if pattern in f]
for filename in matching_files:
full_filename = os.path.join(dir, filename)
to_be_copied.add(filename) # adds filepaths
if os.path.isfile(full_filename):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(full_filename, dest_dir)
pprint.pprint(shutil.copy(full_filename, dest_dir))
# Iterates through copied files
for original_file in to_be_copied:
count = 0
recipients = []
# Iterates through potential recipients
for pattern, dest_dir in user_data:
complete_name = os.path.join(dest_dir, original_file)
if os.path.isfile(complete_name):
count += 1
recipients.append(pattern)
print(original_file + ' sent to ' + str(count) + ' people:')
print(recipients)
# Quick manual check, could be changed to checking if count/recipients is correct
print('Delete original file? (Y or N): ')
delete = input()
if (delete == 'Y'):
os.remove(os.path.join(dir, original_file))

Get folder structure along with folder/file sizes in python

After doing my research for this specific task I found at that most of the solution given for this kind of problem either return the list of all the files or the TOTAL size of the folder/file.
What I am trying to achieve is get an output in the CSV file stating the folder structure i.e. folders - sub folders - files (optional) along with the size information for EACH.
There is no specific format for the CSV. I just need to know the tree structure with the size of the folder/sub-folder.
The reason behind this is that we are moving from physical servers to the cloud. In order to verify whether all the data was retained correctly during conversion I need to make a similar list of all SHARED DRIVES which can later be validated.
Looking forward for meaningful insights. Thanks!
Edit:
Sooo, that should be what you are asking for:
import os
import csv
def sizeof_fmt(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return sizeof_fmt(total_size)
with open("yourfilename.csv", mode="w") as dir_file:
csv_writer = csv.writer(dir_file, delimiter=",")
def files_and_sizes(start_path):
dir_list = [file for file in os.listdir(start_path)]
for file in dir_list:
path = start_path + "\\" + file
if os.path.isdir(path) is True:
csv_writer.writerow([file, get_size(path)])
files_and_sizes(start_path + "\\" + file)
files_and_sizes(r"C:\your\path\here")
Updated to better fit the question.
You can get all files with sizes like this:
import os
all_files_with_size = []
def files_and_sizes(start_path):
dir_list = [file for file in os.listdir(start_path)]
current_dir = []
for file in dir_list:
path = start_path + "\\" + file
if os.path.isdir(path) is True:
current_dir.append(files_and_sizes(path))
else:
current_dir.append((file, os.lstat(path).st_size))
return current_dir
It will return a list containing all files like (file, size) and a sublist for each directory.
I recommend appending the entries to a file, but the formatting is up to you.
Also, if you want the directory sizes as well:
if os.path.isdir(path) is True:
current_dir.append(file, os.lstat(path).st_size)
current_dir.append(files_and_sizes(path))
I believe you will have to use a combination of the solutions that you have already found. Such as 'os.listdir(path)' to get the contents of a directory, 'os.lstat(path).st_size' to get file size, and 'os.path.isdir(path)' and 'os.path.isfile(path)' to determine the type.

Concatenating files with matching string in middle of filename

My goal is to concatenate files in a folder based on a string in the middle of the filename, ideally using python or bash. To simplify the question, here is an example:
P16C-X128-22MB-LL_merged_trimmed.fastq
P16C-X128-27MB-LR_merged_trimmed.fastq
P16C-X1324-14DL-UL_merged_trimmed.fastq
P16C-X1324-21DL-LL_merged_trimmed.fastq
I would like to concatenate based on the value after the first dash but before the second (e.g. X128 or X1324), so that I am left with (in this example), two additional files that contain the concatenated contents of the individual files:
P16C-X128-Concat.fastq (concat of 2 files with X128)
P16C-X1324-Concat.fastq (concat of 2 files with X1324)
Any help would be appreciated.
For simple string manipulations, I prefer to avoid the use of regular expressions. I think that str.split() is enough in this case. Besides, for simple file name matching, the library fnmatch provides enough functionality.
import fnmatch
import os
from itertools import groupby
path = '/full/path/to/files/'
ext = ".fastq"
files = fnmatch.filter(os.listdir(path), '*' + ext)
def by(fname): return fname.split('-')[1] # Ej. X128
# You said:
# I would like to concatenate based on the value after the first dash
# but before the second (e.g. X128 or X1324)
# If you want to keep both parts together, uncomment the following:
# def by(fname): return '-'.join(fname.split('-')[:2]) # Ej. P16C-X128
for k, g in groupby(sorted(files, key=by), key=by):
dst = str(k) + '-Concat' + ext
with open(os.path.join(path, dst), 'w') as dstf:
for fname in g:
with open(os.path.join(path, fname), 'r') as srcf:
dstf.write(srcf.read())
Instead of the read, write in Python, you could also delegate the concatenation to the OS. You would normally use a bash command like this:
cat *-X128-*.fastq > X128.fastq
Using the subprocess library:
import subprocess
for k, g in groupby(sorted(files, key=by), key=by):
dst = str(k) + '-Concat' + ext
with open(os.path.join(path, dst), 'w') as dstf:
command = ['cat'] # +++
for fname in g:
command.append(os.path.join(path, fname)) # +++
subprocess.run(command, stdout=dstf) # +++
Also, for a batch job like this one, you should consider placing the concatenated files in a separate directory, but that is easily done by changing the dst filename.
You can use open to read and write (create) files, os.listdir to get all files (and directories) in a certain directory and re to match file name as needed.
Use a dictionary to store contents by filename prefix (the file's name up until 3rd hyphen -) and concatenate the contents together.
import os
import re
contents = {}
file_extension = "fastq"
# Get all files and directories that are in current working directory
for file_name in os.listdir('./'):
# Use '.' so it doesn't match directories
if file_name.endswith('.' + file_extension):
# Match the first 2 hyphen-separated values from file name
prefix_match = re.match("^([^-]+\-[^-]+)", file_name)
file_prefix = prefix_match.group(1)
# Read the file and concatenate contents with previous contents
contents[file_prefix] = contents.get(file_prefix, '')
with open(file_name, 'r') as the_file:
contents[file_prefix] += the_file.read() + '\n'
# Create new file for each file id and write contents to it
for file_prefix in contents:
file_contents = contents[file_prefix]
with open(file_prefix + '-Concat.' + file_extension, 'w') as the_file:
the_file.write(file_contents)

Iterate over 2 files in each folder and compare them

I compare two text files and print out the results to a 3rd file. I am trying to make it so the script i'm running would iterate over all of the folders that have two text files in them, in the CWD of the script.
What i have so far:
import os
import glob
path = './'
for infile in glob.glob( os.path.join(path, '*.*') ):
print('current file is: ' + infile)
with open (f1+'.txt', 'r') as fin1, open(f2+'.txt', 'r') as fin2:
Would this be a good way to start the iteration process?
It's not the most clear code but it gets the job done. However, i'm pretty sure i need to take the logic out of the read / write methods but i'm not sure where to start.
What i'm basically trying to do is have a script iterate over all of the folders in its CWD, open each folder, compare the two text files inside, write a 3rd text file to the same folder, then move on to the next.
Another method i have tried is as follows:
import os
rootDir = 'C:\\Python27\\test'
for dirName, subdirList, fileList in os.walk(rootDir):
print('Found directory: %s' % dirName)
for fname in fileList:
print('\t%s' % fname)
And this outputs the following (to give you a better example of the file structure:
Found directory: C:\Python27\test
test.py
Found directory: C:\Python27\test\asdd
asd1.txt
asd2.txt
Found directory: C:\Python27\test\chro
ch1.txt
ch2.txt
Found directory: C:\Python27\test\hway
hw1.txt
hw2.txt
Would it be wise to put the compare logic under the for fname in fileList? How do i make sure it compares the two text files inside the specific folder and not with other fnames in the fileList?
This is the full code that i am trying to add this functionality into. I appologize for the Frankenstein nature of it but i am still working on a refined version but it does not work yet.
from collections import defaultdict
from operator import itemgetter
from itertools import groupby
from collections import deque
import os
class avs_auto:
def load_and_compare(self, input_file1, input_file2, output_file1, output_file2, result_file):
self.load(input_file1, input_file2, output_file1, output_file2)
self.compare(output_file1, output_file2)
self.final(result_file)
def load(self, fileIn1, fileIn2, fileOut1, fileOut2):
with open(fileIn1+'.txt') as fin1, open(fileIn2+'.txt') as fin2:
frame_rects = defaultdict(list)
for row in (map(str, line.split()) for line in fin1):
id, frame, rect = row[0], row[2], [row[3],row[4],row[5],row[6]]
frame_rects[frame].append(id)
frame_rects[frame].append(rect)
frame_rects2 = defaultdict(list)
for row in (map(str, line.split()) for line in fin2):
id, frame, rect = row[0], row[2], [row[3],row[4],row[5],row[6]]
frame_rects2[frame].append(id)
frame_rects2[frame].append(rect)
with open(fileOut1+'.txt', 'w') as fout1, open(fileOut2+'.txt', 'w') as fout2:
for frame, rects in sorted(frame_rects.iteritems()):
fout1.write('{{{}:{}}}\n'.format(frame, rects))
for frame, rects in sorted(frame_rects2.iteritems()):
fout2.write('{{{}:{}}}\n'.format(frame, rects))
def compare(self, fileOut1, fileOut2):
with open(fileOut1+'.txt', 'r') as fin1:
with open(fileOut2+'.txt', 'r') as fin2:
lines1 = fin1.readlines()
lines2 = fin2.readlines()
diff_lines = [l.strip() for l in lines1 if l not in lines2]
diffs = defaultdict(list)
with open(fileOut1+'x'+fileOut2+'.txt', 'w') as result_file:
for line in diff_lines:
d = eval(line)
for k in d:
list_ids = d[k]
for i in range(0, len(d[k]), 2):
diffs[d[k][i]].append(k)
for id_ in diffs:
diffs[id_].sort()
for k, g in groupby(enumerate(diffs[id_]), lambda (i, x): i - x):
group = map(itemgetter(1), g)
result_file.write('{0} {1} {2}\n'.format(id_, group[0], group[-1]))
def final(self, result_file):
with open(result_file+'.txt', 'r') as fin:
lines = (line.split() for line in fin)
for k, g in groupby(lines, itemgetter(0)):
fst = next(g)
lst = next(iter(deque(g, 1)), fst)
with open('final/{}.avs'.format(k), 'w') as fout:
fout.write('video0=ImageSource("old\%06d.jpeg", {}-3, {}+3, 15)\n'.format(fst[1], lst[2]))
fout.write('video1=ImageSource("new\%06d.jpeg", {}-3, {}+3, 15)\n'.format(fst[1], lst[2]))
fout.write('video0=BilinearResize(video0,640,480)\n')
fout.write('video1=BilinearResize(video1,640,480)\n')
fout.write('StackHorizontal(video0,video1)\n')
fout.write('Subtitle("ID: {}", font="arial", size=30, align=8)'.format(k))
using the load_and_compare() function, i define two input text files, two output text files, a file for the comparison results and a final phase that writes many files for all of the differences.
What i am trying to do is have this whole class run on the current working directory and go through every sub folder, compare the two text files, and write everything into the same folder, specifically the final() results.
You can indeed use os.walk(), since that already separates the directories from the files. You only need the directories it returns, because that's where you're looking for your 2 specific files.
You could also use os.listdir() but that returns directories as well files in the same list, so you would have to check for directories yourself.
Either way, once you have the directories, you iterate over them (for subdir in dirnames) and join the various path components you have: The dirpath, the subdir name that you got from iterating over the list and your filename.
Assuming there are also some directories that don't have the specific 2 files, it's a good idea to wrap the open() calls in a try..except block and thus ignore the directories where one of the files (or both of them) doesn't exist.
Finally, if you used os.walk(), you can easily choose if you only want to go into directories one level deep or walk the whole depth of the tree. In the former case, you just clear the dirnames list by dirnames[:] = []. Note that dirnames = [] wouldn't work, since that would just create a new empty list and put that reference into the variable instead of clearing the old list.
Replace the print("do something ...") with your program logic.
#!/usr/bin/env python
import errno
import os
f1 = "test1"
f2 = "test2"
path = "."
for dirpath, dirnames, _ in os.walk(path):
for subdir in dirnames:
filepath1, filepath2 = [os.path.join(dirpath, subdir, f + ".txt") for f in f1, f2]
try:
with open(filepath1, 'r') as fin1, open(filepath2, 'r') as fin2:
print("do something with " + str(fin1) + " and " + str(fin2))
except IOError as e:
# ignore directiories that don't contain the 2 files
if e.errno != errno.ENOENT:
# reraise exception if different from "file or directory doesn't exist"
raise
# comment the next line out if you want to traverse all subsubdirectories
dirnames[:] = []
Edit:
Based on your comments, I hope I understand your question better now.
Try the following code snippet instead. The overall structure stays the same, only now I'm using the returned filenames of os.walk(). Unfortunately, that would also make it harder to do something like "go only into the subdirectories 1 level deep", so I hope walking the tree recursively is fine with you. If not, I'll have to add a little code to later.
#!/usr/bin/env python
import fnmatch
import os
filter_pattern = "*.txt"
path = "."
for dirpath, dirnames, filenames in os.walk(path):
# comment this out if you don't want to filter
filenames = [fn for fn in filenames if fnmatch.fnmatch(fn, filter_pattern)]
if len(filenames) == 2:
# comment this out if you don't want the 2 filenames to be sorted
filenames.sort(key=str.lower)
filepath1, filepath2 = [os.path.join(dirpath, fn) for fn in filenames]
with open(filepath1, 'r') as fin1, open(filepath2, 'r') as fin2:
print("do something with " + str(fin1) + " and " + str(fin2))
I'm still not really sure what your program logic does, so you will have to interface the two yourself.
However, I noticed that you're adding the ".txt" extension to the file name explicitly all over your code, so depending on how you are going to use the snippet, you might or might not need to remove the ".txt" extension first before handing the filenames over. That would be achieved by inserting the following line after or before the sort:
filenames = [os.path.splitext(fn)[0] for fn in filenames]
Also, I still don't understand why you're using eval(). Do the text files contain python code? In any case, eval() should be avoided and be replaced by code that's more specific to the task at hand.
If it's a list of comma separated strings, use line.split(",") instead.
If there might be whitespace before or after the comma, use [word.strip() for word in line.split(",")] instead.
If it's a list of comma separated integers, use [int(num) for num in line.split(",")] instead - for floats it works analogously.
etc.

Categories

Resources