Finding the first largest file from multiple random files - python

def line_count(filename):
for filename in os.walk(os.path.abspath('my directory filename')):
lines = 0
with open(filename) as file:
lines = len([line for line in file.readlines() if line.strip() != ''])
print lines
def find_big_files(files):
file_sizes = [(line_count(file), file) for file in files]
print sorted(file_sizes, key = lambda file_size: file_size[0], reverse = True)
sorted_files = find_big_files(file)
does not work.

Since you're looking for the LONGEST files, not the BIGGEST files, do this:
def get_length(file):
len_ = 0
with open(file,'r') as f:
for line in f: len_+=1
return len_
files = [file for file in however_you_build_your_list]
files = sorted(files, key=get_length)
# files[0] is now the longest
# files[-1] is now the shortest

Are you counting empty lines as lines?
if so, the following gives you the number of raw newlines in a file:
def line_count(filename):
lines = 0
with open(filename) as file:
lines = len(file.readlines())
return lines
If not, change the lines = ... to:
lines = len([line for line in file.readlines() if line.strip() != ''])
So, the rest of the code would look like the following:
def find_big_files(files):
largest = (0, None)
second_largest = (0, None)
for file in files:
size = line_count(file)
if size > largest[0]:
second_largest = largest
largest = (size, file)
return largest, second_largest
Note that this is really inefficient because it has to open every file and iterate across it. So it is O(file*count(file)). But if you really care about line count, not really any good way around that, at least for generic .txt files or similar.
If you want the whole list from most lines to least lines:
def find_big_files(files):
file_sizes = [(line_count(file), file) for file in files]
return sorted(file_sizes, key = lambda file_size: file_size[0])
A list of (line_count, file_name) tuples will be returned, and list[-1] will be the largest, list[-2] will be the second largest, and so on.
EDIT:
OP asked me to post the whole code in one block that solves the problem, so here it is:
def line_count(filename):
lines = 0
with open(filename) as file:
lines = len([line for line in file.readlines() if line.strip() != ''])
return lines
def find_big_files(files):
file_sizes = [(line_count(file), file) for file in files]
return sorted(file_sizes, key = lambda file_size: file_size[0], reverse = True)
The return from result = file_big_files(files) will be [(count, filename), ...] from largest to smallest, so result[0] will be the largest, result[1] will be the second largest, etc. Ties will be in the original order they were in the input list of file paths.

Related

Overlapping points in a interval having conditions

I want to find positions that overlaps with two coordinates and also that both are in the same chromosomes.
The file with the positions looks like this
with open(file_path, 'r') as f:
lines = [l for l in f if not l.startswith('#')]
print(lines)
['chr1\t36931696\t.\tT\t.\t100\tPASS\tDP=839\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/.:100:830:839:0.0107:24:-100.0000:0.0071\n', 'chr2\t25457280\t.\tA\t.\t100\tPASS\tDP=1410\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:19:1403:1410:0.0050:24:-100.0000:0.0014\n', '\n', '\n']
# I have limited the file to have only two lines. But actually this normally have 100k lines
And the file with the intervals looks like this
print(bedregions)
[('chr1', 36931694, 36931909, 'CSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)'), ('chr2', 25466989, 25467211, 'DNMT3A.CDS.17.line.57.merged--with.DNMT3A.CDS.16.li.probe--coordinates(25466989-25467211)')]
# I have limited this file as well to have two tuples, this has actually 500 tuples
This is what I have been trying
def roi2(file_path,bedregions):
with open(file_path, 'r') as f:
lines = [l for l in f if not l.startswith('#')]
chr2position = {}
for position, line in enumerate(lines):
# If there is a empty line this will give a empty list
# Amd the following split will give a out of range error
if (len(line)) == 1:
break
# Take the chr
chr = line.strip().split()[0]
if chr not in chr2position:
chr2position[chr] = position
filtered_lines =[]
for element in bedregions:
ch, start, end, probe_name = element
for lineindex in range(start + chr2position[chr], end + chr2position[chr] ):
filtered_lines.append(lines[lineindex])
# This return a error in the last line. IndexError list index out of range
This should do what you want considering the data structure you mentioned
f = open(file_path, 'r')
lines = f.readlines()
chr2base2index = dict()
for index,line in enumerate(lines):
if (len(line)) == 1:
break
if line[0] == '#':
continue
handle = line.strip().split()
chrm, base = handle[0], int(handle[1])
if chrm not in chr2base2index:
chr2base2index[chrm] = dict()
if base not in chr2base2index[chrm]:
chr2base2index[chrm][base] = index
filtered_lines = []
for chrm, start, end, probe_name in bedregions:
if chrm not in chr2base2index:
print(f'Chromosome {chrm} not found')
continue
for base in range(start, end):
index = chr2base2index[chrm].get(base, None)
if index != None:
filtered_lines.append('\t'.join(lines[index].strip().split() + [probe_name]))
filtered_lines
['chr1\t36931696\t.\tT\t.\t100\tPASS\tDP=839\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/.:100:830:839:0.0107:24:-100.0000:0.0071\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931697\t.\tT\t.\t100\tPASS\tDP=832\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:15:829:832:0.0036:24:-100.0000:0.0154\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931698\t.\tT\t.\t100\tPASS\tDP=837\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:36:836:837:0.0012:24:-100.0000:0.0095\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931699\t.\tA\t.\t100\tPASS\tDP=836\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:36:835:836:0.0012:24:-100.0000:0.0107\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931700\t.\tC\t.\t100\tPASS\tDP=818\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:14:814:818:0.0049:24:-100.0000:0.0320\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931701\t.\tA\t.\t100\tPASS\tDP=841\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:20:838:841:0.0036:24:-100.0000:0.0047\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931702\t.\tA\t.\t100\tPASS\tDP=825\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:19:822:825:0.0036:24:-100.0000:0.0237\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931703\t.\tT\t.\t100\tPASS\tDP=833\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:26:832:833:0.0012:24:-100.0000:0.0142\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)',
'chr1\t36931704\t.\tA\t.\t100\tPASS\tDP=833\tGT:GQ:AD:DP:VF:NL:SB:NC\t0/0:11:829:833:0.0048:24:-100.0000:0.0142\tCSF3R.exon.17.line.1.chr1.36931697.36932509--tile--1.probe--coordinates(36931694-36931909)']
Work with as many chromosomes as found in the interval file. If a chromosome is given is the interval file but not in the searched file, this would give you and error so I have put a line that solve that issue and also inform you if that happen.
Results are given in a list, one line per element but this can be change in the last line of the code.

python efficient replacement of strings in nested array

I have a txt file with thousands of lines as strings.
Each line start in the format of '#integer' so for example '#100'.
I read the txt file sequentially (line #1, #2, #3..) and get a specific array that I want, where the array is a collection of the line numbers and other lines connected to those lines:
The array is in the form of:
[ ['#355', '#354', '#357', '#356'], ['#10043', '#10047', '#10045'], ['#1221', '#1220', '#1223', '#1222', '#1224'], [...] ]
It can contain hundreds of numbers.
(this is because I have an array of numbers and further 'children' that are associated with them added to each sub-array.)
I have read my txt file before the below function, meaning that first I read my txt file, extract the numbers, and then pass that as an array to the extended_Strings function, which replaces each number with the actual string for that number line from the txt file.
def extended_strings(matrix,base_txt):
string_matrix = matrix #new matrix to contain our future strings
for numset in string_matrix:
for num in numset:
for line in base_txt:
results = re.findall(r'^#\d+', line) #find the line # at start of string
if len(results) > 0 and results[0] == num: #if we have a # line that matches our # in the numset
index = numset.index(num) #find index of line # in the numset
numset[index] = line #if we match line #'s, we replace the line # with the actual string from the txt
return string_matrix
I am trying to make this process shorter and more efficient, for example I have 150,000 strings in the txt, there are millions of times where the txt file is scanned with the line for line in base_txt.
Any suggestions?
I didn't do any metering. But I'm confident that this could help.
On the other hand, there is still room for lots of improvements.
text.txt:
#1 This is line #00001
#2 This is line #00002
#30 This is line #00030
#35 This is line #00035
#77 This is line #00077
#101 This is line #00101
#145 This is line #00145
#1010 This is line #01010
#8888 This is line #08888
#13331 This is line #13331
#65422 This is line #65422
code:
import re
# reo = re.compile(r'^(#\d+)\s+(.*)\n$') # exclude line numbers in "string_matrix"
reo = re.compile(r'^((#\d+)\s+.*)\n$') # include line numbers in "string_matrix"
def file_to_dict(file_name):
file_dict = {}
with open(file_name) as f:
for line in f:
mo = reo.fullmatch(line)
# file_dict[mo.group(1)] = mo.group(2) # exclude line numbers in "string_matrix"
file_dict[mo.group(2)] = mo.group(1) # include line numbers in "string_matrix"
return file_dict
def extended_strings(matrix, file_dict):
string_matrix = []
for numset in matrix:
new_numset = []
for num in numset:
new_numset.append(file_dict[num])
string_matrix.append(new_numset)
return string_matrix
matrix = [['#1010', '#35', '#2', '#145', '#8888'], ['#30', '#2'], ['#65422', '#1', '#13331', '#77', '#101', '#8888']]
file_dict = file_to_dict('text.txt')
string_matrix = extended_strings(matrix, file_dict)
for list_ in string_matrix:
for line in list_:
print(line)
print()
Thanks for the help Werner Wenzel,
I've found the solution that works for me and would like to share it here:
import re
def file_to_dict(file_name):
file_dict = {}
with open(file_name) as f:
for line in f:
stg = re.findall("(.+)",line)
stgNum = re.findall("#\d{1,10}",line)
file_dict[stgNum[0]] = stg[0]
return file_dict
def extended_strings(matrix, file_dict):
string_matrix = []
for numset in matrix:
new_numset = []
for num in numset:
new_numset.append(file_dict[num])
string_matrix.append(new_numset)
return string_matrix
matrix = [['#1010', '#35', '#2', '#145', '#8888'], ['#30', '#2'], ['#65422', '#1', '#13331', '#77', '#101', '#8888']]
file_dict = file_to_dict('text.txt')
string_matrix = extended_strings(matrix, file_dict)
for list_ in string_matrix:
for line in list_:
print line
print "done"

Do something to line and next lines until a symbol is hit

I have data, that is set up as the following:
//Name_1 * *
>a xyzxyzyxyzyxzzxy
>b xyxyxyzxyyxzyxyz
>c xyzyxzyxyzyxyzxy
//Name_2
>a xyzxyzyxyzxzyxyx
>b zxyzxyzxyyzxyxzx
>c zxyzxyzxyxyzyzxy
//Name_3 * *
>a xyzxyzyxyzxzyxyz
>b zxyzxyzxzyyzyxyx
>c zxyzxyzxyxyzyzxy
...
The //-line refers to an ID for the following group of sequences until the next //-line is reached.
I have been working on writing a program, that reads the position of the asterix, and print the characters on the given position for the sequences.
To simplifiy things for myself, I have been working on a subset of my data, containing only one group of sequences, so e.g.:
//Name_1 * *
>a xyzxyzyxyzyxzzxy
>b xyxyxyzxyyxzyxyz
>c xyzyxzyxyzyxyzxy
My program does what I want on this subset.
import sys
import csv
datafile = open(sys.argv[1], 'r')
outfile = open(sys.argv[1]+"_FGT_Data", 'w')
csv_out = csv.writer(outfile, delimiter=',')
csv_out.writerow(['Locus', 'Individual', 'Nucleotide', 'Position'])
with (datafile) as searchfile:
var_line = [line for line in searchfile if '*' in line]
LocusID = [line[2:13].strip() for line in var_line]
poslist = [i for line in var_line for i, x in enumerate(line) if x =='*']
datafile = open(sys.argv[1], 'r')
with (datafile) as getsnps:
lines = [line for line in getsnps.readlines() if line.startswith('>')]
for pos in poslist:
for line in lines:
snp = line[pos]
individual = line[0:7]
indistr = individual.strip()
csv_out.writerow((LocusID[0], indistr, line[pos], str(pos)))
datafile.close()
outfile.close()
However, now I am trying to modify it to work on the full dataset. I am having trouble finding a way to iterate over the data in the correct way.
I need to search through the file, and when a line containing '' is reached, I need to do as in the above code for the sequences corresponding to the given line, and then continue to the next line containing an ''. Do I need to split up my data with regards to the //-lines or what is the best approach?
I have uploaded a sample of my data to dropbox:
Data_Sample.txt contains several groups, and is the kind of data, I am trying to get the program to work on.
Data_One_Group.txt contains only one group, and is the data I have gotten the program to work on so far.
https://www.dropbox.com/sh/3j4i04s2rg6b63h/AADkWG3OcsutTiSsyTl8L2Vda?dl=0
--------EDIT---------
I am trying to implement the suggestion by #Julien Spronck below.
However, I am having trouble processing the produced block. How would I be able to search through the block line for line. E.g., why does the below not work as intended? It just prints the asterix' and not the line itself.
block =''
with open('onelocus.txt', 'r') as searchfile:
for line in searchfile:
if line.startswith('//'):
#print line
if block:
for line in block:
if '*' in line:
print line
block = line
else:
block += line
---------EDIT 2----------
I am getting closer. I understand that fact, that I need to split the string into line, to be able to search through them. The below works on one group, but when I try to itereate over several, it prints the information for the first group only. But does it for as many groups, as there are. I have tried clearing LocusID and poslist before next iteration, but this does not seem to be the solution.
block =''
with (datafile) as searchfile:
for line in searchfile:
if line.startswith('//'):
if block:
var_line = [line for line in block.splitlines() if '*' in line]
LocusID = [line[2:13].strip() for line in var_line]
print LocusID
poslist = [i for line in var_line for i, x in enumerate(line) if x == '*']
print poslist
block = line
else:
block += line
Can't you do something like:
block =''
with open(filename, 'r') as fil:
for line in fil:
if line.startswith('//'):
if block:
do_something_with(block)
block = line
else:
block += line
if block:
do_something_with(block)
In this code, I just append the lines of the file to a variable block. Once I find a line that starts with //, I process the previous block and reinitialize the block for the next iteration.
The last two lines will take care of processing the last block, which would not be processed otherwise.
do_something_with(block) could be something like this:
def do_something_with(block):
lines = block.splitlines()
j = 0
first_line = lines[j]
while first_line.strip() == '':
j += 1
first_line = lines[j]
pos = []
position = first_line.find('*')
while position != -1:
pos.append(position)
position = first_line.find('*', position+1)
for k, line in enumerate(lines):
if k > j:
for p in pos:
print line[p],
print
## prints
## z y
## x z
## z y
I have created a way to make this work with the data you provided.
You should run it with 2 file locations, 1 should be your input.txt and 2 should be your output.csv
explanation
first we create a dictionary with the locus as key and the sequences as values.
We iterate over this dictionary and get the * locations in the locus and append these to a list indexes.
We iterate over the values belonging to this key and extract the sequence
per iteration we iterate over indexes so that we gather the snps.
per iteration we append to our csv file.
We empty the indexes list so we can go to the next key.
Keep in mind
This method is highly dependant on the amount of spaces you have inside your input.txt.
You should know that this will not be the fastest way to get it done. but it does get it done.
I hope this helped, if you have any questions, feel free to ask them, and if I have time, I will happily try to answer them.
script
import sys
import csv
sequences = []
dic = {}
indexes = []
datafile = sys.argv[1]
outfile = sys.argv[2]
with open(datafile,'r') as snp_file:
lines = snp_file.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("//"):
dic[lines[i].rstrip()] = sequences
del sequences[:]
if lines[i].startswith(">"):
sequences.append(lines[i].rstrip())
for key in dic:
locus = key.split(" ")[0].replace("//","")
for i, x in enumerate(key):
if x == '*':
indexes.append(i-11)
for sequence in dic[key]:
seq = sequence.split(" ")[1]
seq_id = sequence.split(" ")[0].replace(">","")
for z in indexes:
position = z+1
nucleotide = seq[z]
with open(outfile,'a')as handle:
csv_out = csv.writer(handle, delimiter=',')
csv_out.writerow([locus,seq_id,position,nucleotide])
del indexes[:]
input.txt
//Locus_1 * *
>Safr01 AATCCGTTTTAAACCAGNTCYAT
>Safr02 TTAATCCGTTTTAAACCAGNTCY
//Locus_2 * *
>Safr01 AATCCGTTTTAAACCAGNTCYAT
>Safr02 TTAATCCGTTTTAAACCAGNTCY
output.csv
Locus_1,Safr01,1,A
Locus_1,Safr01,22,A
Locus_1,Safr02,1,T
Locus_1,Safr02,22,C
Locus_2,Safr01,5,C
Locus_2,Safr01,19,T
Locus_2,Safr02,5,T
Locus_2,Safr02,19,G
This is how I ended up solving the problem:
def do_something_with(block):
lines = block.splitlines()
for line in lines:
if '*' in line:
hit = line
LocusID = hit[2:13].strip()
for i, x in enumerate(hit):
if x=='*':
poslist.append(i)
for pos in poslist:
for line in lines:
if line.startswith('>'):
individual = line[0:7].strip()
snp = line[pos]
print LocusID, individual, snp, pos,
csv_out.writerow((LocusID, individual, snp, pos))
with (datafile) as searchfile:
for line in searchfile:
if line.startswith('//'):
if block:
do_something_with(block)
poslist = list()
block = line
else:
block += line
if block:
do_something_with(block)

Extract from current position until end of file

I want to pull all data from a text file from a specified line number until the end of a file. This is how I've tried:
def extract_values(f):
line_offset = []
offset = 0
last_line_of_heading = False
if not last_line_of_heading:
for line in f:
line_offset.append(offset)
offset += len(line)
if whatever_condition:
last_line_of_heading = True
f.seek(0)
# non-functioning pseudocode follows
data = f[offset:] # read from current offset to end of file into this variable
There is actually a blank line between the header and the data I want, so ideally I could skip this also.
Do you know the line number in advance? If so,
def extract_values(f):
line_number = # something
data = f.readlines()[line_number:]
If not, and you need to determine the line number based on the content of the file itself,
def extract_values(f):
lines = f.readlines()
for line_number, line in enumerate(lines):
if some_condition(line):
data = lines[line_number:]
break
This will not be ideal if your files are enormous (since the lines of the file are loaded into memory); in that case, you might want to do it in two passes, only storing the file data on the second pass.
Your if clause is at the wrong position:
for line in f:
if not last_line_of_heading:
Consider this code:
def extract_values(f):
rows = []
last_line_of_heading = False
for line in f:
if last_line_of_heading:
rows.append(line)
elif whatever_condition:
last_line_of_heading = True
# if you want a string instead of an array of lines:
data = "\n".join(rows)
you can use enumerate:
f=open('your_file')
for i,x in enumerate(f):
if i >= your_line:
#do your stuff
here i will store line number starting from 0 and x will contain the line
using list comprehension
[ x for i,x in enumerate(f) if i >= your_line ]
will give you list of lines after specified line
using dictionary comprehension
{ i:x for i,x in enumerate(f) if i >= your_line }
this will give you line number as key and line as value, from specified line number.
Try this small python program, LastLines.py
import sys
def main():
firstLine = int(sys.argv[1])
lines = sys.stdin.read().splitlines()[firstLine:]
for curLine in lines:
print curLine
if __name__ == "__main__":
main()
Example input, test1.txt:
a
b
c
d
Example usage:
python LastLines.py 2 < test1.txt
Example output:
c
d
This program assumes that the first line in a file is the 0th line.

Splitting large text file into smaller text files by line numbers using Python

I have a text file say really_big_file.txt that contains:
line 1
line 2
line 3
line 4
...
line 99999
line 100000
I would like to write a Python script that divides really_big_file.txt into smaller files with 300 lines each. For example, small_file_300.txt to have lines 1-300, small_file_600 to have lines 301-600, and so on until there are enough small files made to contain all the lines from the big file.
I would appreciate any suggestions on the easiest way to accomplish this using Python
lines_per_file = 300
smallfile = None
with open('really_big_file.txt') as bigfile:
for lineno, line in enumerate(bigfile):
if lineno % lines_per_file == 0:
if smallfile:
smallfile.close()
small_filename = 'small_file_{}.txt'.format(lineno + lines_per_file)
smallfile = open(small_filename, "w")
smallfile.write(line)
if smallfile:
smallfile.close()
Using itertools grouper recipe:
from itertools import zip_longest
def grouper(n, iterable, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
n = 300
with open('really_big_file.txt') as f:
for i, g in enumerate(grouper(n, f, fillvalue=''), 1):
with open('small_file_{0}'.format(i * n), 'w') as fout:
fout.writelines(g)
The advantage of this method as opposed to storing each line in a list, is that it works with iterables, line by line, so it doesn't have to store each small_file into memory at once.
Note that the last file in this case will be small_file_100200 but will only go until line 100000. This happens because fillvalue='', meaning I write out nothing to the file when I don't have any more lines left to write because a group size doesn't divide equally. You can fix this by writing to a temp file and then renaming it after instead of naming it first like I have. Here's how that can be done.
import os, tempfile
with open('really_big_file.txt') as f:
for i, g in enumerate(grouper(n, f, fillvalue=None)):
with tempfile.NamedTemporaryFile('w', delete=False) as fout:
for j, line in enumerate(g, 1): # count number of lines in group
if line is None:
j -= 1 # don't count this line
break
fout.write(line)
os.rename(fout.name, 'small_file_{0}.txt'.format(i * n + j))
This time the fillvalue=None and I go through each line checking for None, when it occurs, I know the process has finished so I subtract 1 from j to not count the filler and then write the file.
I do this a more understandable way and using less short cuts in order to give you a further understanding of how and why this works. Previous answers work, but if you are not familiar with certain built-in-functions, you will not understand what the function is doing.
Because you posted no code I decided to do it this way since you could be unfamiliar with things other than basic python syntax given that the way you phrased the question made it seem as though you did not try nor had any clue as how to approach the question
Here are the steps to do this in basic python:
First you should read your file into a list for safekeeping:
my_file = 'really_big_file.txt'
hold_lines = []
with open(my_file,'r') as text_file:
for row in text_file:
hold_lines.append(row)
Second, you need to set up a way of creating the new files by name! I would suggest a loop along with a couple counters:
outer_count = 1
line_count = 0
sorting = True
while sorting:
count = 0
increment = (outer_count-1) * 300
left = len(hold_lines) - increment
file_name = "small_file_" + str(outer_count * 300) + ".txt"
Third, inside that loop you need some nested loops that will save the correct rows into an array:
hold_new_lines = []
if left < 300:
while count < left:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
sorting = False
else:
while count < 300:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
Last thing, again in your first loop you need to write the new file and add your last counter increment so your loop will go through again and write a new file
outer_count += 1
with open(file_name,'w') as next_file:
for row in hold_new_lines:
next_file.write(row)
note: if the number of lines is not divisible by 300, the last file will have a name that does not correspond to the last file line.
It is important to understand why these loops work. You have it set so that on the next loop, the name of the file that you write changes because you have the name dependent on a changing variable. This is a very useful scripting tool for file accessing, opening, writing, organizing etc.
In case you could not follow what was in what loop, here is the entirety of the function:
my_file = 'really_big_file.txt'
sorting = True
hold_lines = []
with open(my_file,'r') as text_file:
for row in text_file:
hold_lines.append(row)
outer_count = 1
line_count = 0
while sorting:
count = 0
increment = (outer_count-1) * 300
left = len(hold_lines) - increment
file_name = "small_file_" + str(outer_count * 300) + ".txt"
hold_new_lines = []
if left < 300:
while count < left:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
sorting = False
else:
while count < 300:
hold_new_lines.append(hold_lines[line_count])
count += 1
line_count += 1
outer_count += 1
with open(file_name,'w') as next_file:
for row in hold_new_lines:
next_file.write(row)
lines_per_file = 300 # Lines on each small file
lines = [] # Stores lines not yet written on a small file
lines_counter = 0 # Same as len(lines)
created_files = 0 # Counting how many small files have been created
with open('really_big_file.txt') as big_file:
for line in big_file: # Go throught the whole big file
lines.append(line)
lines_counter += 1
if lines_counter == lines_per_file:
idx = lines_per_file * (created_files + 1)
with open('small_file_%s.txt' % idx, 'w') as small_file:
# Write all lines on small file
small_file.write('\n'.join(stored_lines))
lines = [] # Reset variables
lines_counter = 0
created_files += 1 # One more small file has been created
# After for-loop has finished
if lines_counter: # There are still some lines not written on a file?
idx = lines_per_file * (created_files + 1)
with open('small_file_%s.txt' % idx, 'w') as small_file:
# Write them on a last small file
small_file.write('n'.join(stored_lines))
created_files += 1
print '%s small files (with %s lines each) were created.' % (created_files,
lines_per_file)
import csv
import os
import re
MAX_CHUNKS = 300
def writeRow(idr, row):
with open("file_%d.csv" % idr, 'ab') as file:
writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)
writer.writerow(row)
def cleanup():
for f in os.listdir("."):
if re.search("file_.*", f):
os.remove(os.path.join(".", f))
def main():
cleanup()
with open("large_file.csv", 'rb') as results:
r = csv.reader(results, delimiter=',', quotechar='\"')
idr = 1
for i, x in enumerate(r):
temp = i + 1
if not (temp % (MAX_CHUNKS + 1)):
idr += 1
writeRow(idr, x)
if __name__ == "__main__": main()
with open('/really_big_file.txt') as infile:
file_line_limit = 300
counter = -1
file_index = 0
outfile = None
for line in infile.readlines():
counter += 1
if counter % file_line_limit == 0:
# close old file
if outfile is not None:
outfile.close()
# create new file
file_index += 1
outfile = open('small_file_%03d.txt' % file_index, 'w')
# write to file
outfile.write(line)
I had to do the same with 650000 line files.
Use the enumerate index and integer div it (//) with the chunk size
When that number changes close the current file and open a new one
This is a python3 solution using format strings.
chunk = 50000 # number of lines from the big file to put in small file
this_small_file = open('./a_folder/0', 'a')
with open('massive_web_log_file') as file_to_read:
for i, line in enumerate(file_to_read.readlines()):
file_name = f'./a_folder/{i // chunk}'
print(i, file_name) # a bit of feedback that slows the process down a
if file_name == this_small_file.name:
this_small_file.write(line)
else:
this_small_file.write(line)
this_small_file.close()
this_small_file = open(f'{file_name}', 'a')
Set files to the number of file you want to split the master file to
in my exemple i want to get 10 files from my master file
files = 10
with open("data.txt","r") as data :
emails = data.readlines()
batchs = int(len(emails)/10)
for id,log in enumerate(emails):
fileid = id/batchs
file=open("minifile{file}.txt".format(file=int(fileid)+1),'a+')
file.write(log)
A very easy way would if you want to split it in 2 files for example:
with open("myInputFile.txt",'r') as file:
lines = file.readlines()
with open("OutputFile1.txt",'w') as file:
for line in lines[:int(len(lines)/2)]:
file.write(line)
with open("OutputFile2.txt",'w') as file:
for line in lines[int(len(lines)/2):]:
file.write(line)
making that dynamic would be:
with open("inputFile.txt",'r') as file:
lines = file.readlines()
Batch = 10
end = 0
for i in range(1,Batch + 1):
if i == 1:
start = 0
increase = int(len(lines)/Batch)
end = end + increase
with open("splitText_" + str(i) + ".txt",'w') as file:
for line in lines[start:end]:
file.write(line)
start = end
In Python files are simple iterators. That gives the option to iterate over them multiple times and always continue from the last place the previous iterator got. Keeping this in mind, we can use islice to get the next 300 lines of the file each time in a continuous loop. The tricky part is knowing when to stop. For this we will "sample" the file for the next line and once it is exhausted we can break the loop:
from itertools import islice
lines_per_file = 300
with open("really_big_file.txt") as file:
i = 1
while True:
try:
checker = next(file)
except StopIteration:
break
with open(f"small_file_{i*lines_per_file}.txt", 'w') as out_file:
out_file.write(checker)
for line in islice(file, lines_per_file-1):
out_file.write(line)
i += 1

Categories

Resources