How to run multiple files using python - python

I am trying to run multiple text files from folder on my desktop from which i have to search the word olympic and if it find olympic in that 50 text files than it should save it in output.txt , like
textfile1 = 2
textfile2 = 1 and so on upto texfile=50
import glob
import re
write_file = open("output.txt")
flist = glob.glob("./*.py") # adjust glob pattern as desired
print flist
print " lines : path to file"
for fpath in flist:
with open(fpath) as f: #open files
lines = f.readlines()
if (lines == 'olympic'):
write_file.write("%s" % lines) #write the results
print "%6d : %s" % (len(lines),fpath)
#with open securely opens and closes the file
write_file.close() # close file
This is what i am trying to do , but yes i know its full of error :)
i am trying to run multiple files , but not manually , i want that it automatically run the files of whole directory/folder and save their output in one text file..'I have 50 text files and all files have word olympic , some have 1 some have 2/3 etc , i want to count the words from each text file and than save their output in one text file , like textfile1 = 2 , textfile2 = 3 etc in output.txt

Try this
import os
import re
filelist = [file for file in os.listdir('.') if '.py' in file]
for file in filelist:
f = open(file,"r")
lines = f.readlines()
sum=0
sum1=0
for line in lines:
if "olympics" in line:
len(re.findall('\Wolympics\W', line))
sum=sum+1
print sum
elif "Olympics" in line:
sum1=sum1+1
print sum1
print "%6d : %s" % (len(line),file.name)
f.close()
Not really sure what you're attempting to do, but I gave it a shot...

something like that:
fetch.py
#! /usr/bin/env python
import os
import sys
def get_counts(filepath, niddle):
with open(filepath, 'rb') as f:
return f.read().count(niddle)
if __name__ == '__main__':
folder = sys.argv[1]
niddle = sys.argv[2]
assert os.path.isdir(folder), "Not a folder"
output = open('results.txt', 'w')
for dirpath, dirnames, filenames in os.walk(folder):
for filename in filenames:
if not filename.endswith('.html'): # or whatever
continue
filepath = os.path.abspath('%s/%s' % (dirpath, filename))
result = '%s = %d' % (filepath, get_counts(filepath, niddle))
output.write(result)
output.close()
use as:
$ python fetch.py /path/to/search olimpic

Related

Processing each file in directory, output with different extension

I have written a Python program that reads a test input file and outputs an output file
inputFile = open ('test.dat', 'r')
outputFile = open('test.log', 'w')
outputFile.write(inputFile.read())
inputFile.close()
outputFile.close()
I'd like to read all the .dat files in a folder and output corresponding .log files, keeping the file name prefix the same. Could somebody help me?
I've figured out that I could list all the dat files with the following code. But don't know what to do from then.
import os
for file in os.listdir("."):
if file.endswith(".htm"):
print(os.path.join("xxx", file))
Also: is there any way to know the count of .dat files in a directory? This way, while processing each file, I could display the progress status, like: "Processing File 1 of 999 data files", etc.
Thanks a bunch
Joanna
You can use glob to list only the files you want and shutil.copyfile to copy them to the new file name. Use the size of the list you get from glob to print progress along the way.
from glob import glob
import shutil
dat_files = list(glob("*.dat"))
dat_file_len = len(dat_files)
for i, dat_file in enumerate(dat_files, 1):
print(f"copying {i} of {dat_file_len}")
shutil.copyfile(dat_file, datfile[:-4] + ".log")
First you would have to get a list of all the .dat files contained in the directory, the code would look like this:
import glob
import os
dat_files = glob.glob('*.dat')
for i, dat_file in enumerate(dat_files):
print("Writing %d file..." %(i+1))
inputFile = open (dat_file, 'r')
outputFile = open(dat_file[:-3]+"log", 'w') # removing "dat" from end, and inserting new extension "log"
outputFile.write(inputFile.read())
inputFile.close()
outputFile.close()
You can use below program:
import glob
files_list = glob.glob('*.dat')
count_dat_file=len(files_list)
print("Count of .dat file is : {}\n".format(count_dat_file))
count_var = 1
for item in files_list:
print("Pocessing file {} of {}".format(count_var, count_dat_file))
print("Old File Name is : {}".format(item))
file_name_list = item.split('.')
file_name_list[-1] = 'log'
new_file_name = '.'.join(file_name_list)
print("New file Name is : {}".format(new_file_name))
with open(item,'r') as input_file:
with open(new_file_name,'w') as output_file:
output_file.write(input_file.read())
print("Data written to new file : {}".format(new_file_name))
count_var+=1
print("\n")

How to unzip all folders/files that end in .zip and extract “file.txt” file from each zipped folder

My code currently unzips one zip folder and finds the file called file.txt and extracts it. Now I need to unzip multiple folders that have the extension .zip. I have tried to use code similar to what I need it to do but the problem is that now I have to find a file called file.txt in each of those .zip folders and extract that file only . Also to store file.txt into a separate folder that has the same name where it came from. Thank you in advance for your time.
import re
import os
from zipfile import ZipFile
def pain():
print("\t\t\tinput_files.zip has been unzipped")
with ZipFile('input_files.zip', 'r') as zipObj:
zipObj.extractall()
listOfFileNames = zipObj.namelist()
for fileName in listOfFileNames:
if fileName.endswith('.txt'):
zipObj.extract(fileName, 'storage')
outfile = "output2.txt" #this will be the filename that the code will write to
baconFile = open(outfile,"wt")
file_name1 = "file.txt"
print('Filename\tLine\tnumber of numbers\tstring separated by a comma\twhite space found\ttab found\tcarriage return found\n') #This prints the master column in the python shell and this is the way the code should collect the data
baconFile.write('Filename\tLine\tnumber of numbers\tstring separated by a comma\twhite space found\ttab found\tcarriage return found\n') #This prints the master column in the output file and this is the way the code should collect the data
#for filename in os.listdir(os.getcwd() + "/input_files"):
for filename in os.listdir('C:\Users\M29858\Desktop\TestPy\Version10\input_files'):
with open("input_files/" + filename, 'r') as f:
if file_name1 in filename:
output_contents(filename, f, baconFile)
baconFile.close() #closes the for loop that the code is writing to
def output_contents(filename, f, baconFile): #using open() function to open the file inside the directory
index = 0
for line in f:
#create a list of all of the numerical values in our line
content = line.split(',') #this will be used to count the amount numbers before and after comma
whitespace_found = False
tab_found = False
false_string = "False (end of file)"
carriage_found = false_string
sigfigs = ""
index += 1 #adds 1 for every line if it finds what the command wants
if " " in line: #checking for whitespace
whitespace_found = True
if "\t" in line: #checking for tabs return
tab_found = True
if '\n' in line: #checking if there is a newline after the end of each line
carriage_found = True
sigfigs = (','.join(str(len(g)) for g in re.findall(r'\d+\.?(\d+)?', line ))) #counts the sigsfigs after decimal point
print(filename + "\t{0:<4}\t{1:<17}\t{2:<27}\t{3:17}\t{4:9}\t{5:21}"
.format(index, len(content), sigfigs, str(whitespace_found), str(tab_found), str(carriage_found))) #whatever is inside the .format() is the way it the data is stored into
baconFile.write('\n')
baconFile.write( filename + "\t{0:<4}\t{1:<17}\t{2:<27}\t{3:17}\t{4:9}\t{5:21}"
.format(index, len(content), sigfigs, str(whitespace_found), str(tab_found), str(carriage_found)))
if __name__ == '__main__':
pain()
#THIS WORKS
import glob
import os
from zipfile import ZipFile
def main():
for fname in glob.glob("*.zip"): # get all the zip files
with ZipFile(fname) as archive:
# if there's no file.txt, ignore and go on to the next zip file
if 'file.txt' not in archive.namelist(): continue
# make a new directory named after the zip file
dirname = fname.rsplit('.',1)[0]
os.mkdir(dirname)
extract file.txt into the directory you just created
archive.extract('file.txt', path=dirname)

Concatenate multiple files' data into one file and also rename the file?

Using python how can I combine all the text file in the specified directory into one text file and rename the output text file with the same filename.
For example: Filea.txt and Fileb_2.txt is in root directory, and it output generated file is Filea_Fileb_2.txt
Filea.txt
123123
21321
Fileb_2.txt
2344
23432
Filea_Fileb_2.txt
123123
21321
2344
23432
my script:
PWD1 = /home/jenkins/workspace
files = glob.glob(PWD1 + '/' + '*.txt')
with open(f, 'r') as file:
for line in (file):
outputfile = open('outputfile.txt', 'a')
outputfile.write(line)
outputfile.close()
Here's another way to combine text files.
#! python3
from pathlib import Path
import glob
folder_File1 = r"C:\Users\Public\Documents\Python\CombineFIles"
txt_only = r"\*.txt"
files_File1 = glob.glob(f'{folder_File1}{txt_only}')
new_txt = f'{folder_File1}\\newtxt.txt'
newFile = []
for indx, file in enumerate(files_File1):
if file == new_txt:
pass
else:
contents = Path(file).read_text()
newFile.append(contents)
file = open(new_txt, 'w')
file.write("\n".join(newFile))
file.close()
This is a working solution which stores both file names and file contents in a list, then joins the list filenames and creates a "combined" filename and then adds the contents of all the files to it, because lists append in order that the data is read this is sufficient (my example filenames are filea.txt and fileb.txt but it will work for the filenames you've used):
import os
import sys
path = sys.argv[1]
files = []
contents = []
for f in os.listdir(path):
if f.endswith('.txt'): # in case there are other file types in there
files.append(str(f.replace('.txt', ''))) #chops off txt so we can join later
with open(f) as cat:
for line in cat:
contents.append(line) # put file contents in list
outfile_name = '_'.join(x for x in files)+'.txt' #create your output filename
outfile = open(outfile_name, 'w')
for line in contents:
outfile.write(line)
outfile.close()
to run this on a specific directory just pass it on the commandline:
$python3.6 catter.py /path/to/my_text_files/
output filename:
filea_fileb.txt
contents:
123123
21321
2344
23432

How to save data from python into a csv file

I've got a program that on the end prints a "match" I wanted to save the data in this "match" to a csv file, how can I do that? I've wrote some code, to save this variable, but it doesn't write anything
Here's my code:
import shlex
import subprocess
import os
import platform
from bs4 import BeautifulSoup
import re
import csv
import pickle
def rename_files():
file_list = os.listdir(r"C:\\PROJECT\\pdfs")
print(file_list)
saved_path = os.getcwd()
print('Current working directory is '+saved_path)
os.chdir(r'C:\\PROJECT\\pdfs')
for file_name in file_list:
os.rename(file_name, file_name.translate(None, " "))
os.chdir(saved_path)
rename_files()
def run(command):
if platform.system() != 'Windows':
args = shlex.split(command)
else:
args = command
s = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = s.communicate()
return s.returncode == 0, output, errors
# Change this to your PDF file base directory
base_directory = 'C:\\PROJECT\\pdfs'
if not os.path.isdir(base_directory):
print "%s is not a directory" % base_directory
exit(1)
# Change this to your pdf2htmlEX executable location
bin_path = 'C:\\Python27\\pdfminer-20140328\\tools\\pdf2txt.py'
if not os.path.isfile(bin_path):
print "Could not find %s" % bin_path
exit(1)
for dir_path, dir_name_list, file_name_list in os.walk(base_directory):
for file_name in file_name_list:
# If this is not a PDF file
if not file_name.endswith('.pdf'):
# Skip it
continue
file_path = os.path.join(dir_path, file_name)
# Convert your PDF to HTML here
args = (bin_path, file_name, file_path)
success, output, errors = run("python %s -o %s.html %s " %args)
if not success:
print "Could not convert %s to HTML" % file_path
print "%s" % errors
htmls_path = 'C:\\PROJECT'
for dir_path, dir_name_list, file_name_list in os.walk(htmls_path):
for file_name in file_name_list:
if not file_name.endswith('.html'):
continue
with open(file_name) as markup:
soup = BeautifulSoup(markup.read())
text = soup.get_text()
match = re.findall("PA/(\S*)\s*(\S*)", text)
print(match)
with open ('score.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows('%s' %match)
The part where I tried to save it into a csv file is the last 3 lines of code.
Here's a print of the "match" format: https://gyazo.com/930f9dad12109bc50825c91b51fb31f3
the way your code is structured, you iterate over the matches in your for loop, then, when the loop is finished, you save the last match in your CSV. You probably want to write each match in your CSV instead, inside the for loop.
try to replace the last lines of your code (starting at the last for loop) by:
with open('score.csv', 'wt') as f:
writer = csv.writer(f)
for dir_path, dir_name_list, file_name_list in os.walk(htmls_path):
for file_name in file_name_list:
if not file_name.endswith('.html'):
continue
with open(file_name) as markup:
soup = BeautifulSoup(markup.read())
text = soup.get_text()
match = re.findall("PA/(\S*)\s*(\S*)", text)
print(match)
writer.writerow(match)
Assuming you already have your "match", you can use the CSV module in Python. The writer should get your job done.
It would be more helpful if you could elaborate on the format of your data.

Python - extract and modify part of a specific line of text with a function for all files in folder

I'm looking to extract and modify a specific line of text in many files within a folder but I am having some trouble.
For instance, the first file might read:
To: Bob
From: Bill
<Message> The eagle flies at midnight. <End Message>
The second message is different, but same format, and so on. I'd like to extract the third line, pass 'The eagle flies at midnight.' through a function (like base64), and then put it back on the line between 'Message' and 'End Message'. Such that the final output would read:
To: Bob
From: Bill
<Message> VGhlIGVhZ2xlIGZsaWVzIGF0IG1pZG5pZ2h0Lg== <End Message>
This is what I am trying (and adjusting) so far.
import base64
import os
import io
#ask user where his stuff is / is going
directory = raw_input("INPUT Folder:")
output = raw_input("OUTPUT Folder:")
#get that stuff
myfilepath = os.path.join(directory, '*.txt')
with open('*.txt', 'r') as file:
data = file.readlines()
#Go to line 3 and take out non encoded text.
data[3] = X
X.strip("<Message>")
X.strip("<End Message>")
coded_string = X
#Encode line 3
base64.b64encode(coded_string)
data[3] = '<Message> %s <End Message>' % (coded_string)
# and write everything back
with open('*.txt', 'w') as file:
file.writelines(data)
I'm sure there are numerous problems, particularly with how I am opening and writing back. Bonus points: 99% of the messages in this folder are in this exact format, but there are 1% junk messages (they dont need to be encoded, and line 3 for them is something different). I'm not too worried about them, but if they could be unharmed in the process that'd be nifty. Maybe line 3 should be line 2 if the count starts at 0 ...
Edit: Trying
import re, base64
import os
folder = 'C:/Users/xxx/Desktop/input'
matcher = re.compile("<Message>(?P<text>[^<]*)<End Message>")
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
if not os.path.isfile(infilename): continue
base, extension = os.path.splitext(filename)
filein = open(infilename, 'r')
fileout = open(os.path.join(folder, '{}_edit.{}'.format(base, extension)), 'w')
for line in filein:
match = matcher.search(line)
if match:
fileout.write("<message> " + base64.b64encode(match.group('text').strip()) + " <End message>\n")
else:
fileout.write(line)
filein.close()
fileout.close()
Ultimately this gives me a bunch of blank files except for the last one which is translated properly.
You can use regular expression to make it easier as:
import re, base64
filein = open("examplein.txt", 'r')
fileout = open("exampleout.txt", 'w')
matcher = re.compile("<Message>(?P<text>[^<]*)<End Message>")
for line in filein:
match = matcher.search(line)
if match:
fileout.write("<message> " + base64.b64encode(match.group('text').strip()) + " <End message>\n")
else:
fileout.write(line)
filein.close()
fileout.close()
This code works just for one file, you should adapt it to work with all the file in you directory:
import re, base64
import os
folder = '/home/user/Public'
matcher = re.compile("<Message>(?P<text>[^<]*)<End Message>")
for filename in os.listdir(folder):
infilename = os.path.join(folder, filename)
if not os.path.isfile(infilename): continue
base, extension = os.path.splitext(filename)
filein = open(infilename, 'r')
fileout = open(os.path.join(folder, '{}_edit.{}'.format(base, extension)), 'w')
for line in filein:
match = matcher.search(line)
if match:
fileout.write("<message> " + base64.b64encode(match.group('text').strip()) + " <End message>\n")
else:
fileout.write(line)
filein.close()
fileout.close()
This code works in my pc

Categories

Resources