Python - Passing a number as a parameter to filename - python

for example i have two files .txt.
First file has 78 lines, second file has 30 lines.
Is there any easy way to pass a number as a parameter to the result?
Currently in result I get:
first_file_20.txt
first_file_40.txt
first_file_60.txt
first_file_80.txt
second_file_20.txt
second_file_40.txt
but I would like to have as a result:
first_file_1.txt
first_file_2.txt
first_file_3.txt
first_file_4.txt
second_file_1.txt
second_file_2.txt
code:
import re
import os
lines_per_file = 20
smallfile = None
root_path = os.getcwd()
if os.path.exists(root_path):
files = []
for name in os.listdir(root_path):
if os.path.isfile(os.path.join(root_path,name)):
files.append(os.path.join(root_path,name))
print(files) #list all files in directory
for ii in files:
if ii.endswith(".txt"): # only txt files
with open(ii,'r') as bigfile:
name1 = str(os.path.basename(ii).split(".")[0])
name2 = str(name1 + '_{}.txt')
#
print('name', name2)
for lineno, line in enumerate(bigfile):
w = 1
if lineno % lines_per_file == 0:
if smallfile:
smallfile.close()
small_filename = name2.format(lineno + lines_per_file)
smallfile = open(small_filename, "w")
smallfile.write(line)
if smallfile:
smallfile.close()
Anyone can help me?

Don't add lineno and lines_per_file, divide them.
small_filename = name2.format(lineno//lines_per_file + 1)

Related

Unexpected end of data when zipping zip files in Python

Good day.
I wrote a little Python program to help me easily create .cbc files for Calibre, which is just a renamed .zip file with a text file called comics.txt for TOC purposes. Each chapter is another zip file.
The issue is that the last zip file zipped always has the error "Unexpected end of data". The file itself is not corrupt, if I unzip it and rezip it it works perfectly. Playing around it seems that the problem is that Python doesn't close the last zip file after zipping it, since I can't delete the last zip while the program is still running since it's still open in Python. Needless to say, Calibre doesn't like the file and fails to convert it unless I manually rezip the affected chapters.
The code is as follows, checking the folders for not-image files, zipping the folders, zipping the zips while creating the text file, and "changing" extension.
import re, glob, os, zipfile, shutil, pathlib, gzip, itertools
Folders = glob.glob("*/")
items = len(Folders)
cn_list = []
cn_list_filtered = []
dirs_filtered = []
ch_id = ["c", "Ch. "]
subdir_im = []
total = 0
Dirs = next(os.walk('.'))[1]
for i in range(0, len(Dirs)):
for items in os.listdir("./" + Dirs[i]):
if items.__contains__('.png') or items.__contains__('.jpg'):
total+=1
else:
print(items + " not an accepted format.")
subdir_im.append(total)
total = 0
for fname in Folders:
if re.search(ch_id[0] + r'\d+' + r'[\S]' + r'\d+', fname):
cn = re.findall(ch_id[0] + "(\d+[\S]\d+)", fname)[0]
cn_list.append(cn)
elif re.search(ch_id[0] + r'\d+', fname):
cn = re.findall(ch_id[0] + "(\d+)", fname)[0]
cn_list.append(cn)
elif re.search(ch_id[1] + r'\d+' + '[\S]' + r'\d+', fname):
cn = re.findall(ch_id[1] + "(\d+[\S]\d+)", fname)[0]
cn_list.append(cn)
elif re.search(ch_id[1] + r'\d+', fname):
cn = re.findall(ch_id[1] + "(\d+)", fname)[0]
cn_list.append(cn)
else:
print('Warning: File found without proper filename format.')
cn_list_filtered = set(cn_list)
cn_list_filtered = sorted(cn_list_filtered)
cwd = os.getcwd()
Dirs = Folders
subdir_zi = []
total = 0
for i in range(0, len(cn_list_filtered)):
for folders in Dirs:
if folders.__contains__(ch_id[0] + cn_list_filtered[i] + " ")\
or folders.__contains__(ch_id[1] + cn_list_filtered[i] + " "):
print('Zipping folder ', folders)
namezip = "Chapter " + cn_list_filtered[i] + ".zip"
current_zip = zipfile.ZipFile(namezip, "a")
for items in os.listdir(folders):
if items.__contains__('.png') or items.__contains__('.jpg'):
current_zip.write(folders + "/" + items, items)
total+=1
subdir_zi.append(total)
total = 0
print('Folder contents in order:', subdir_im, ' Total:', sum(subdir_im))
print("Number of items per zip: ", subdir_zi, ' Total:', sum(subdir_zi))
if subdir_im == subdir_zi:
print("All items in folders have been successfully zipped")
else:
print("Warning: File count in folders and zips do not match. Please check the affected chapters")
zips = glob.glob("*.zip")
namezip2 = os.path.basename(os.getcwd()) + ".zip"
zipfinal = zipfile.ZipFile(namezip2, "a")
for i in range(0, len(zips), 1):
zipfinal.write(zips[i],zips[i])
Data = []
for i in range (0,len(cn_list_filtered),1):
Datai = ("Chapter " + cn_list_filtered[i] + ".zip" + ":Chapter " + cn_list_filtered[i] + "\r\n")
Data.append(Datai)
Dataok = ''.join(Data)
with zipfile.ZipFile(namezip2, 'a') as myzip:
myzip.writestr("comics.txt", Dataok)
zipfinal.close()
os.rename(namezip2, namezip2 + ".cbc")
os.system("pause")
I am by no means a programmer, that is just a Frankenstein monster code I eventually managed to put together by checking threads, but this last issue has me stumped.
Some solutions I tried are:
for i in range(0, len(zips), 1):
zipfinal.write(zips[i],zips[i])
zips[i].close()
Fails with:
zips[i].close()
AttributeError: 'str' object has no attribute 'close'
and:
for i in range(0, len(zips), 1):
zipfinal.write(zips[i],zips[i])
zips[len(zips)].close()
Fails with:
zips[len(zips)].close()
IndexError: list index out of range
Thanks for the help.
This solved my issue:
def generate_zip(file_list, file_name=None):
zip_buffer = io.BytesIO()
zf = zipfile.ZipFile(zip_buffer, mode="w", compression=zipfile.ZIP_DEFLATED)
for file in file_list:
print(f"Filename: {file[0]}\nData: {file[1]}")
zf.writestr(file[0], file[1])
**zf.close()**
with open(file_name, 'wb') as f:
f.write(zip_buffer.getvalue())
f.close()

Export Excel Module via Python

I'm trying to replicate the exporting of a Code Module from an Excel sheet in Python.
The following works in VBA:
Public Sub ExportModules()
Dim wb As Workbook
Set wb = ThisWorkbook
Dim D As String
Dim N
D = ThisWorkbook.Path
For Each VBComp In wb.VBProject.VBComponents
If (VBComp.Type = 1) Then
N = D + "\" + VBComp.Name + ".txt"
VBComp.Export N
End If
Next
End Sub
And I have the following in Python:
import os
import sys
import glob
from win32com.client import Dispatch
scripts_dir = 'folder address'
com_instance = Dispatch("Excel.Application")
com_instance.Visible = False
com_instance.DisplayAlerts = False
for script_file in glob.glob(os.path.join(scripts_dir, "*.xlsm")):
print "Processing: %s" % script_file
(file_path, file_name) = os.path.split(script_file)
objworkbook = com_instance.Workbooks.Open(script_file)
for xlmodule in objworkbook.VBProject.VBComponents:
xlmodule.Export('export file name')
My question is, what do I have to do in Python to replicate the Export of the file as per the VBA code?
Use the default oletools xltrails provides a good way to extract .bas files from .xlsm or other excel files
import os
import shutil
from oletools.olevba3 import VBA_Parser
EXCEL_FILE_EXTENSIONS = ('xlsb', 'xls', 'xlsm', 'xla', 'xlt', 'xlam',)
def parse(workbook_path):
vba_path = workbook_path + '.vba'
vba_parser = VBA_Parser(workbook_path)
vba_modules = vba_parser.extract_all_macros() if vba_parser.detect_vba_macros() else []
for _, _, _, content in vba_modules:
decoded_content = content.decode('latin-1')
lines = []
if '\r\n' in decoded_content:
lines = decoded_content.split('\r\n')
else:
lines = decoded_content.split('\n')
if lines:
name = lines[0].replace('Attribute VB_Name = ', '').strip('"')
content = [line for line in lines[1:] if not (
line.startswith('Attribute') and 'VB_' in line)]
if content and content[-1] == '':
content.pop(len(content)-1)
lines_of_code = len(content)
non_empty_lines_of_code = len([c for c in content if c])
if non_empty_lines_of_code > 0:
if not os.path.exists(os.path.join(vba_path)):
os.makedirs(vba_path)
with open(os.path.join(vba_path, name + '.bas'), 'w') as f:
f.write('\n'.join(content))
if __name__ == '__main__':
for root, dirs, files in os.walk('.'):
for f in dirs:
if f.endswith('.vba'):
shutil.rmtree(os.path.join(root, f))
for f in files:
if f.endswith(EXCEL_FILE_EXTENSIONS):
parse(os.path.join(root, f))
I have tried it and it works great.
Ref: https://www.xltrail.com/blog/auto-export-vba-commit-hook

read multiple files automatically no manual file naming

I have a directory contains 50 files I want to read them one by one and compare wit the other files - that is fixed. I am using glob.blob. But it didn't work.
Here how I am reading all files. Instead, path = '*.rbd' if I give the file name like path = run-01.rbd it works.
path = '*.rbd'
path = folder + path
files=sorted(glob.glob(path))
complete code
import glob
from itertools import islice
import linecache
num_lines_nonbram = 1891427
bits_perline = 32
total_bit_flips = 0
num_bit_diff_flip_zero = 0
num_bit_diff_flip_ones = 0
folder = "files/"
path = '*.rbd'
path = folder + path
files=sorted(glob.glob(path))
original=open('files/mull-original-readback.rbd','r')
#source1 = open(file1, "r")
for filename in files:
del_lines = 101
with open(filename,'r') as f:
i=1
while i <= del_lines:
line1 = f.readline()
lineoriginal=original.readline()
i+=1
i=0
num_bit_diff_flip_zero = 0
num_bit_diff_flip_ones = 0
num_lines_diff =0
i=0
j=0
k=0
a_write2 = ""
while i < (num_lines_nonbram-del_lines):
line1 = f.readline()
lineoriginal = original.readline()
while k < bits_perline:
if ((lineoriginal[k] == line1[k])):
a_write2 += " "
else:
if (lineoriginal[k]=="0"):
#if ((line1[k]=="0" and line1[k]=="1")):
num_bit_diff_flip_zero += 1
if (lineoriginal[k]=="1"):
#if ((line1[k]=="0" and line1[k]=="1")):
num_bit_diff_flip_ones += 1
#if ((line1[k]==1 and line1[k]==0)):
#a_write_file2 = str(i+1) + " " + str(31-k) + "\n" + a_write_file2
#a_write2 += "^"
#num_bit_diff_flip_one += 1
# else:
# a_write2 += " "
k+=1
total_bit_flips=num_bit_diff_flip_zero+num_bit_diff_flip_ones
i+=1
k=0
i = 0
print files
print "Number of bits flip zero= %d" %num_bit_diff_flip_zero +"\n" +"Number of bits flip one= %d" %num_bit_diff_flip_ones +"\n" "Total bit flips = %d " %total_bit_flips
f.close()
original.close()
You could use the os module to first list everything in a directory (both files and modules) then use a python generator to filter out only the files. You could then use a second python generator to filter out files with a specific extension. There is probably a more efficient way of doing it but this works:
import os
def main():
path = './' # The path to current directory
# Go through all items in the directory and filter out files
files = [file for file in os.listdir(path) if
os.path.isfile(os.path.join(path, file))]
# Go through all files and filter out files with .txt (for example)
specificExtensionFiles = [file for file in files if ".txt" in file]
# Now specificExtensionFiles is a generator for .txt files in current
# directory which you can use in a for loop
print (specificExtensionFiles)
if __name__ == '__main__':
main()
For further reference:
How do I list all files of a directory?
The problem is that you're not going back to the beginning of originalfile whenever you start comparing with the next file in the for filename in files: loop. The simplest solution is to put:
original.seek(0)
at the beginning of that loop.
You could also read the whole file into a list just once before the loop, and use that instead of reading the file repeatedly.
And if you only want to process part of the files, you can read the file into a list, and then use a list slice to get the lines you want.
You also shouldn't be setting num_bit_diff_flip_zero and num_bit_diff_flip_one to 0 each time through the loop, since these are supposed to be the total across all files.
with open('files/mull-original-readback.rbd','r') as original:
original_lines = list(original)[del_lines:num_lines_nonbram]
for filename in files:
with open(file, 'r') as f:
lines = list(f)[del_lines:num_lines_nonbram]
for lineoriginal, line1 in zip(original_lines, lines):
for k in range(bits_perline):
if lineoriginal[k] == line1[k]:
a_write2 += " "
elif lineoriginal[k] == "0"
num_bit_diff_flip_zero += 1
else:
num_bit_diff_flip_ones += 1
total_bit_flips = num_bit_diff_flip_zero + num_bit_diff_flip_ones

How to write a daily .csv to a specific filepath

I'm running a program daily and want the .csv is generates to be written to a folder on my C drive. For some reason, I can create the folder and write 1 file but no others are being written. Not getting any errors, just no other files are being written to that folder. Here's the code. Thanks
Code:
CSVdir = r"C:\Users\Maurice\Desktop\Python\New_Project\OptionsData\\OptionsData-{}.csv"
realCSVdir = os.path.realpath(CSVdir)
if not os.path.exists(CSVdir):
os.makedirs(CSVdir)
str1 = "\n".join(data)
now = datetime.datetime.now() #+ datetime.timedelta(days=1)
now_str = now.strftime("%Y-%m-%d")
new_file_name = os.path.join(realCSVdir,'OptionsData-{}.csv'.format(now_str))
new_file = open(new_file_name, 'wb')
for item in money_list:
if len(item) != 0 :
for other_item in item :
new_file.write(other_item + str1 + new_file)
new_file.close()
print("Eureka!")
CSVdir = r"C:\Users\Maurice\Desktop\Python\New_Project\OptionsData\\OptionsData-{}.csv"
should be
CSVdir = r"C:\Users\Maurice\Desktop\Python\New_Project\OptionsData"
if not os.path.exists(CSVdir):
os.makedirs(CSVdir)
# The following lines should be out of if statement.
str1 = "\n".join(data)
now = datetime.datetime.now() #+ datetime.timedelta(days=1)
now_str = now.strftime("%Y-%m-%d")
new_file_name = os.path.join(realCSVdir,'OptionsData-{}.csv'.format(now_str))
new_file = open(new_file_name, 'wb')
for item in money_list:
if len(item) != 0 :
for other_item in item :
new_file.write(other_item + str1 + new_file)
new_file.close()
print("Eureka!")

Read all files from folder and edit

I am trying to read all fasta files from test folder and put the name of file in all headers of individual file. The code working for first file and dont proceed to second file and return error. Could you help me find bug in my code or edit it. Thanks
import sys, glob, os, string
header = ''
check = 0
path = "./test/"
dirs = os.listdir(path)
for file in dirs:
fp = open(file, "r")
fpx = open('%s_output.txt' % file, 'w')
for line in fp:
if line.startswith('>'):
line = line.rstrip()
check = check + 1
if check >= 1:
header = line
fpx.write(header + '_' + file + '\n')
else:
line = line.rstrip()
fpx.write(line + '\n')
It would be good to provide the error message you are getting! I think this must fail with "File not found" because you try to open the file by name instead of path. Try fp = open(os.path.join(path, file), "r"):
import sys, glob, os, string
header = ''
check = 0
path = "./test/"
dirs = os.listdir(path)
for file in dirs:
fp = open(os.path.join(path, file), "r")
fpx = open('%s_output.txt' % file, 'w')
for line in fp:
if line.startswith('>'):
line = line.rstrip()
check = check + 1
if check >= 1:
header = line
fpx.write(header + '_' + file + '\n')
else:
line = line.rstrip()
fpx.write(line + '\n')

Categories

Resources