How to optimize binary file manipulation? - python

here is my code:
def decode(filename):
with open(filename, "rb") as binary_file:
# Read the whole file at once
data = bytearray( binary_file.read())
for i in range(len(data)):
data[i] = 0xff - data[i]
with open("out.log", "wb") as out:
out.write(data)
I have a file around 10MB, and I need to translate this file by flipping every bits, and save it to a new file.
It takes around 1 second using my code to translate a 10MB file, while it only takes less than 1ms using C.
This is my first python script. I don't if it is right to use bytearray. The most time consuming code is loop for bytearray.

If using using the numpy library is an option, then using it would be much★ faster since it can perform the operation on all the bytes via a single statement. Doing byte-level operations in pure Python to relatively large amoont of data is inherently going to be relatively slow as compared to using a module like numpy which is implemented in C and optimized for array processing.
★ Although not by quite as much in Python 2 as in 3 (see results below).
The following is a framework I set up to benchmark using it vs the code in your question. It may seem like a lot of code, but most of it is just part of the scaffolding for making performance comparisons.
I encourage others answering this question to also make use of it.
from __future__ import print_function
from collections import namedtuple
import os
import sys
from random import randrange
from textwrap import dedent
from tempfile import NamedTemporaryFile
import timeit
import traceback
N = 1 # Number of executions of each "algorithm".
R = 3 # Number of repetitions of those N executions.
UNITS = 1024 * 1024 # MBs
FILE_SIZE = 10 * UNITS
# Create test files. Must be done here at module-level to allow file
# deletions at end.
with NamedTemporaryFile(mode='wb', delete=False) as inp_file:
FILE_NAME_IN = inp_file.name
print('Creating temp input file: "{}", length {:,d}'.format(FILE_NAME_IN, FILE_SIZE))
inp_file.write(bytearray(randrange(256) for _ in range(FILE_SIZE)))
with NamedTemporaryFile(mode='wb', delete=False) as out_file:
FILE_NAME_OUT = out_file.name
print('Creating temp output file: "{}"'.format(FILE_NAME_OUT))
# Common setup for all testcases (executed prior to any Testcase specific setup).
COMMON_SETUP = dedent("""
from __main__ import FILE_NAME_IN, FILE_NAME_OUT
""")
class Testcase(namedtuple('CodeFragments', ['setup', 'test'])):
""" A test case is composed of separate setup and test code fragments. """
def __new__(cls, setup, test):
""" Dedent code fragment in each string argument. """
return tuple.__new__(cls, (dedent(setup), dedent(test)))
testcases = {
"user3181169": Testcase("""
def decode(filename, out_filename):
with open(filename, "rb") as binary_file:
# Read the whole file at once
data = bytearray(binary_file.read())
for i in range(len(data)):
data[i] = 0xff - data[i]
with open(out_filename, "wb") as out:
out.write(data)
""", """
decode(FILE_NAME_IN, FILE_NAME_OUT)
"""
),
"using numpy": Testcase("""
import numpy as np
def decode(filename, out_filename):
with open(filename, 'rb') as file:
data = np.frombuffer(file.read(), dtype=np.uint8)
# Applies mathematical operation to entire array.
data = 0xff - data
with open(out_filename, "wb") as out:
out.write(data)
""", """
decode(FILE_NAME_IN, FILE_NAME_OUT)
""",
),
}
# Collect timing results of executing each testcase multiple times.
try:
results = [
(label,
min(timeit.repeat(testcases[label].test,
setup=COMMON_SETUP + testcases[label].setup,
repeat=R, number=N)),
) for label in testcases
]
except Exception:
traceback.print_exc(file=sys.stdout) # direct output to stdout
sys.exit(1)
# Display results.
major, minor, micro = sys.version_info[:3]
bitness = 64 if sys.maxsize > 2**32 else 32
print('Fastest to slowest execution speeds using ({}-bit) Python {}.{}.{}\n'
'({:,d} execution(s), best of {:d} repetition(s)'.format(
bitness, major, minor, micro, N, R))
print()
longest = max(len(result[0]) for result in results) # length of longest label
ranked = sorted(results, key=lambda t: t[1]) # ascending sort by execution time
fastest = ranked[0][1]
for result in ranked:
print('{:>{width}} : {:9.6f} secs, relative speed: {:6,.2f}x, ({:8,.2f}% slower)'
''.format(
result[0], result[1], round(result[1]/fastest, 2),
round((result[1]/fastest - 1) * 100, 2),
width=longest))
# Clean-up.
for filename in (FILE_NAME_IN, FILE_NAME_OUT):
try:
os.remove(filename)
except FileNotFoundError:
pass
Output (Python 3):
Creating temp input file: "T:\temp\tmpw94xdd5i", length 10,485,760
Creating temp output file: "T:\temp\tmpraw4j4qd"
Fastest to slowest execution speeds using (32-bit) Python 3.7.1
(1 execution(s), best of 3 repetition(s)
using numpy : 0.017744 secs, relative speed: 1.00x, ( 0.00% slower)
user3181169 : 1.099956 secs, relative speed: 61.99x, (6,099.14% slower)
Output (Python 2):
Creating temp input file: "t:\temp\tmprk0njd", length 10,485,760
Creating temp output file: "t:\temp\tmpvcaj6n"
Fastest to slowest execution speeds using (32-bit) Python 2.7.15
(1 execution(s), best of 3 repetition(s)
using numpy : 0.017930 secs, relative speed: 1.00x, ( 0.00% slower)
user3181169 : 0.937218 secs, relative speed: 52.27x, (5,126.97% slower)

Related

Why does concatenation in Python appear to be getting slower?

Why does it appear that concatenation in Python 3 is slower in some cases than in Python 2?
The most impacted method of concatenation appears to be successive concatenation of bytes objects, which has gone from an O(n) to O(n²) operation.
The bulk of my profiling code is here:
#!/usr/bin/env python
from operator import concat
from sys import version, version_info
from timeit import timeit # Compatibility: ver >= 2.6
# ver = version.partition('\n')[0].rstrip()
ver = '.'.join(str(v) for v in version_info[:3])
print(ver)
if version_info[0] == 2:
from StringIO import StringIO
else:
from io import StringIO
from functools import reduce
xrange = range
def build_plus():
output = ''
for _ in xrange(input_len):
output += 'a'
return output
def build_join():
return ''.join('a' for _ in xrange(input_len))
def build_bytes_plus():
output = b''
for _ in xrange(input_len):
output += b'a'
return output
def build_stringio():
output = StringIO()
for _ in xrange(input_len):
output.write('a')
return output.getvalue()
def build_reduce():
return reduce(concat, ('a' for _ in xrange(input_len)))
builds = {'str+': build_plus,
'join': build_join,
'reduce': build_reduce,
'bytes+': build_bytes_plus,
'StringIO': build_stringio}
if version_info[0] == 2:
import cStringIO
def build_cstringio():
output = cStringIO.StringIO()
for _ in xrange(input_len):
output.write('a')
return output.getvalue()
builds['cStringIO'] = build_cstringio
else:
from io import BytesIO
def build_bytesio():
output = BytesIO()
for _ in xrange(input_len):
output.write(b'a')
return output.getvalue()
builds['BytesIO'] = build_bytesio
resfile = open('times.csv', 'a')
size_range = 50 # Number of points over the size axis
min_order = 1.0 # 10^x byte input min
max_order = 5.0 # 10^x byte input max
for allow_gc in (False, True):
setup = 'gc.enable()' if allow_gc else 'pass'
for build_name, build_fun in builds.items():
# For a roughly constant confidence interval, aim for uniform sample density across the
# (logarithmic) input size axis.
for size_index in range(size_range+1):
input_len = int(10**((max_order-min_order)*size_index/size_range + min_order))
# Rather than repeating many measurements at one input size, perform one measurement
# per input size for a continuous range of input sizes and apply smoothing later.
dur = timeit(build_fun, setup, number=1)
resfile.write('"%s",%s,"%s",%d,%.6g\n' % (ver, str(allow_gc).upper(), build_name,
input_len, dur))
Some graphs from my R script shown here:
Concatenating strings with + or += in a loop was never a good idea. It only seemed efficient because there was a weird, controversial special case in the bytecode interpreter loop which would attempt to concatenate strings mutatively if it could prove no one else had a reference to the string it was messing with. There was no efficient resize policy in place; it just called realloc and hoped for the best, so it could still end up O(n^2) if realloc needed to copy.
In Python 3, that weird special case now handles unicode strings instead of bytestrings. Bytestring concatenation goes back to building a new string object each time, so your loop goes back to O(n^2).

How to improve performance when having a huge list of bytes to be written to file?

In python, I have a huge list of floating point values(nearly 30 million values). I have to convert each of them as 4 byte values in little endian format and write all those to a binary file in order.
For a list with some thousands or even 100k of data, my code is working fine. But if the data increases, it is taking time to process and write to file. What optimization techniques can I use to write to file more efficiently?
As suggested in this blog , I am replacing all the small writes to a file by the use of bytearray. But still, the performance is not satisfiable.
Also I have tried multiprocessing (concurrent.futures.ProcessPoolExecutor()) to utilize all the cores in the system instead of using a single CPU core. But still it is taking more time to complete the execution.
Can anyone give me more suggestions on how to improve the performance(in terms of time and memory) for this problem.
Here is my code:
def process_value (value):
hex_value = hex(struct.unpack('<I', struct.pack('<f', value))[0])
if len(hex_value.split('x')[1]) < 8:
hex_value = hex_value[:2] + ('0' * (8 - len(hex_value.split('x')[1]))) + hex_value[2:]
dec1 = int( hex_value.split('x')[1][0] + hex_value.split('x')[1][1], 16)
dec2 = int(hex_value.split('x')[1][2]+hex_value.split('x')[1][3],16)
dec3 = int(hex_valur.split('x')[1][4]+hex_value.split('x')[1][5],16)
dec4 = int(hex_value.split('x')[1][6]+hex_value.split('x')[1][7],16)
msg = bytearray( [dec4,dec3,dec2,dec1] )
return msg
def main_function(fp, values):
msg = bytearray()
for val in values:
msg.extend (process_value(val))
fp.write(msg)
You could try converting all the floats before writing them, and then write the resulting data in one go:
import struct
my_floats = [1.111, 1.222, 1.333, 1.444]
with open('floats.bin', 'wb') as f_output:
f_output.write(struct.pack('<{}f'.format(len(my_floats)), *my_floats))
For the amount of values you have, you might need to do this in large blocks:
import struct
def blocks(data, n):
for i in xrange(0, len(data), n):
yield data[i:i+n]
my_floats = [1.111, 1.222, 1.333, 1.444]
with open('floats.bin', 'wb') as f_output:
for block in blocks(my_floats, 10000):
f_output.write(struct.pack('<{}f'.format(len(block)), *block))
The output from struct.pack() should be in the correct binary format for writing directly to the file. The file must be opened in binary mode e.g. wb is used.

Processing a huge amount of files in python

I have a huge number of report files (about 650 files) which takes about 320 M of hard disk and I want to process them. There are a lot of entries in each file; I should count and log them based on their content. Some of them are related to each other and I should find, log and count them too; matches may be in different files. I have wrote a simple script to do the job. I used python profiler and it just took about 0.3 seconds to run the script for one single file with 2000 lines that we need half of them for processing. But for the whole directory it took 1 hour and a half to be done. This is how my script looks like:
# imports
class Parser(object):
def __init__(self):
# load some configurations
# open some log files
# set some initial values for some variables
def parse_packet(self, tags):
# extract some values from line
def found_matched(self, packet):
# search in the related list to find matched line
def save_packet(self, packet):
# write the line in the appropriate files and increase or decrease some counters
def parse(self, file_addr):
lines = [l for index, l in enumerate(open(file_addr, 'r').readlines()) if index % 2 != 0]
for line in lines:
packet = parse_packet(line)
if found_matched(packet):
# count
self.save_packet(packet)
def process_files(self):
if not os.path.isdir(self.src_dir):
self.log('No such file or directory: ' + str(self.src_dir))
sys.exit(1)
input_dirs = os.walk(self.src_dir)
for dname in input_dirs:
file_list = dname[2]
for fname in file_list:
self.parse(os.path.join(dname[0], fname))
self.finalize_process()
def finalize_process(self):
# closing files
I want to decrease the time at least to the 10% percent of current execution time. Maybe multiprocessing can help me or just some enhancement in current script will do the task. Anyway could you please help me in this?
Edit 1:
I have changed my code according to #Reut Sharabani's answer:
def parse(self, file_addr):
lines = [l for index, l in enumerate(open(file_addr, 'r').readlines()) if index % 2 != 0]
for line in lines:
packet = parse_packet(line)
if found_matched(packet):
# count
self.save_packet(packet)
def process_files(self):
if not os.path.isdir(self.src_dir):
self.log('No such file or directory: ' + str(self.src_dir))
sys.exit(1)
input_dirs = os.walk(self.src_dir)
for dname in input_dirs:
process_pool = multiprocessing.Pool(10)
for fname in file_list:
file_list = [os.path.join(dname[0], fname) for fname in dname[2]]
process_pool.map(self.parse, file_list)
self.finalize_process()
I also added below lines before my class definition to avoid PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup
__builtin__.instancemethod failed:
import copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
Another thing that I have done into my code was not to keep open log files during file processing; I open and close them for writing each entry just to avoid ValueError: I/O operation on closed file.
Now the problem is that I have some files which are being processed multiple times. I also got wrong counts for my packets. What did I do wrong? Should I put process_pool = multiprocessing.Pool(10) before the for loop? Consider that I have just one directory right now and it doesn't seem to be the problem.
EDIT 2:
I also tried using ThreadPoolExecutor this way:
with ThreadPoolExecutor(max_workers=10) as executor:
for fname in file_list:
executor.submit(self.parse, fname)
Results were correct, but it took an hour and a half to be completed.
First of all, "about 650 files which takes about 320 M" is not a lot. Given that modern hard disks easily read and write 100 MB/s, the I/O performance of your system probably is not your bottleneck (also supported by "it just took about 0.3 seconds to run the script for one single file with 2000 lines", which clearly indicates CPU-limitation). However, the exact way you are reading files from within Python may not be efficient.
Furthermore, a simple multiprocessing-based architecture, run on a common multi core system, will allow you to perform your analysis much faster (no need to involve celery here, no need to cross machine boundaries).
multiprocessing architecture
Just have a look at multiprocessing, your architecture likely will involve one manager process (the parent), which defines a task Queue, and a Pool of worker processes. The manager (or feeder) puts tasks (e.g. file names) into the queue, and the workers consume these. After finishing with a task, a worker lets the manager know, and proceeds consuming the next one.
file processing method
This is quite inefficient:
lines = [l for index, l in enumerate(open(file_addr, 'r').readlines()) if index % 2 != 0]
for line in lines:
...
readlines() reads the entire file before the list comprehension is evaluated. Only after that you again iterate through all lines. Hence, you iterate three times through your data. Combine everything into a single loop, so that you iterate the lines only once.
You should be using threads here. If you're blocked by cpu later, you can use processes.
To explain I first created a ten thousand files (0.txt ... 9999.txt), with a line count that's equivalent to the name (+1), using this command:
for i in `seq 0 999`; do for j in `seq 0 $i`; do echo $i >> $i.txt; done ; done
Next, I've created a python script using a ThreadPool with 10 threads to count the lines of all files that have an even value:
#!/usr/bin/env python
from multiprocessing.pool import ThreadPool
import time
import sys
print "creating %s threads" % sys.argv[1]
thread_pool = ThreadPool(int(sys.argv[1]))
files = ["%d.txt" % i for i in range(1000)]
def count_even_value_lines(filename):
with open(filename, 'r') as f:
# do some processing
line_count = 0
for line in f.readlines():
if int(line.strip()) % 2 == 0:
line_count += 1
print "finished file %s" % filename
return line_count
start = time.time()
print sum(thread_pool.map(count_even_value_lines, files))
total = time.time() - start
print total
As you can see this takes no time, and the results are correct. 10 files are processed in parallel and the cpu is fast enough to handle the results. If you want even more you may consider using threads and processes to utilize all cpus as well as not letting IO block you.
Edit:
As comments suggest, I was wrong and this is not I/O blocked, so you can speed it up using multiprocessing (cpu blocked). Because I used a ThreadPool which has the same interface as Pool you can make minimal edits and have the same code running:
#!/usr/bin/env python
import multiprocessing
import time
import sys
files = ["%d.txt" % i for i in range(2000)]
# function has to be defined before pool is opened and workers are forked
def count_even_value_lines(filename):
with open(filename, 'r') as f:
# do some processing
line_count = 0
for line in f:
if int(line.strip()) % 2 == 0:
line_count += 1
return line_count
print "creating %s processes" % sys.argv[1]
process_pool = multiprocessing.Pool(int(sys.argv[1]))
start = time.time()
print sum(process_pool.map(count_even_value_lines, files))
total = time.time() - start
print total
Results:
me#EliteBook-8470p:~/Desktop/tp$ python tp.py 1
creating 1 processes
25000000
21.2642059326
me#EliteBook-8470p:~/Desktop/tp$ python tp.py 10
creating 10 processes
25000000
12.4360249043
Aside from using parallel processing, your parse method is rather inefficient as #Jan-PhilipGehrcke already pointed out. To expand on his recommendation: The classical variant:
def parse(self, file_addr):
with open(file_addr, 'r') as f:
line_no = 0
for line in f:
line_no += 1
if line_no % 2 != 0:
packet = parse_packet(line)
if found_matched(packet):
# count
self.save_packet(packet)
Or using your style (assuming you use python 3):
def parse(self, file_addr):
with open(file_addr, 'r') as f:
filtered = (l for index,l in enumerate(f) if index % 2 != 0)
for line in filtered:
# and so on
The thing to notice here, is the use of iterators, all operations to build the filtered list (which is not actually a list!!) operate on and return iterators, which means that at no point the entire file is loaded into a list.

Fastest way to write large CSV with Python

I want to write some random sample data in a csv file until it is 1GB big. Following code is working:
import numpy as np
import uuid
import csv
import os
outfile = 'data.csv'
outsize = 1024 # MB
with open(outfile, 'ab') as csvfile:
wtr = csv.writer(csvfile)
while (os.path.getsize(outfile)//1024**2) < outsize:
wtr.writerow(['%s,%.6f,%.6f,%i' % (uuid.uuid4(), np.random.random()*50, np.random.random()*50, np.random.randint(1000))])
How to get it faster?
The problem appears to be mainly IO-bound. You can improve the I/O a bit by writing to the file in larger chunks instead of writing one line at a time:
import numpy as np
import uuid
import os
outfile = 'data-alt.csv'
outsize = 10 # MB
chunksize = 1000
with open(outfile, 'ab') as csvfile:
while (os.path.getsize(outfile)//1024**2) < outsize:
data = [[uuid.uuid4() for i in range(chunksize)],
np.random.random(chunksize)*50,
np.random.random(chunksize)*50,
np.random.randint(1000, size=(chunksize,))]
csvfile.writelines(['%s,%.6f,%.6f,%i\n' % row for row in zip(*data)])
You can experiment with the chunksize (the number of rows written per chunk) to see what works best on your machine.
Here is a benchmark, comparing the above code to your original code, with outsize set to 10 MB:
% time original.py
real 0m5.379s
user 0m4.839s
sys 0m0.538s
% time write_in_chunks.py
real 0m4.205s
user 0m3.850s
sys 0m0.351s
So this is is about 25% faster than the original code.
PS. I tried replacing the calls to os.path.getsize with an estimation of the number of total lines needed. Unfortunately, it did not improve the speed. Since the number of bytes needed to represent the final int varies, the estimation also is inexact -- that is, it does not perfectly replicate the behavior of your original code. So I left the os.path.getsize in place.
Removing all unnecessary stuff, and therefore it should be faster and easier to understand:
import random
import uuid
outfile = 'data.csv'
outsize = 1024 * 1024 * 1024 # 1GB
with open(outfile, 'ab') as csvfile:
size = 0
while size < outsize:
txt = '%s,%.6f,%.6f,%i\n' % (uuid.uuid4(), random.random()*50, random.random()*50, random.randrange(1000))
size += len(txt)
csvfile.write(txt)
This is an update building on unutbu's answer above:
A large % of the time was spent in generating random numbers and checking the file size.
If you generate the rows ahead of time you can assess the raw disc io performance:
import time
from pathlib import Path
import numpy as np
import uuid
outfile = Path('data-alt.csv')
chunksize = 1_800_000
data = [
[uuid.uuid4() for i in range(chunksize)],
np.random.random(chunksize) * 50,
np.random.random(chunksize) * 50,
np.random.randint(1000, size=(chunksize,))
]
rows = ['%s,%.6f,%.6f,%i\n' % row for row in zip(*data)]
t0 = time.time()
with open(outfile, 'a') as csvfile:
csvfile.writelines(rows)
tdelta = time.time() - t0
print(tdelta)
On my standard 860 evo ssd (not nvme), I get 1.43 sec for 1_800_000 rows so that's 1,258,741 rows/sec (not too shabby imo)

Why is loading this file taking so much memory?

Trying to load a file into python. It's a very big file (1.5Gb), but I have the available memory and I just want to do this once (hence the use of python, I just need to sort the file one time so python was an easy choice).
My issue is that loading this file is resulting in way to much memory usage. When I've loaded about 10% of the lines into memory, Python is already using 700Mb, which is clearly too much. At around 50% the script hangs, using 3.03 Gb of real memory (and slowly rising).
I know this isn't the most efficient method of sorting a file (memory-wise) but I just want it to work so I can move on to more important problems :D So, what is wrong with the following python code that's causing the massive memory usage:
print 'Loading file into memory'
input_file = open(input_file_name, 'r')
input_file.readline() # Toss out the header
lines = []
totalLines = 31164015.0
currentLine = 0.0
printEvery100000 = 0
for line in input_file:
currentLine += 1.0
lined = line.split('\t')
printEvery100000 += 1
if printEvery100000 == 100000:
print str(currentLine / totalLines)
printEvery100000 = 0;
lines.append( (lined[timestamp_pos].strip(), lined[personID_pos].strip(), lined[x_pos].strip(), lined[y_pos].strip()) )
input_file.close()
print 'Done loading file into memory'
EDIT: In case anyone is unsure, the general consensus seems to be that each variable allocated eats up more and more memory. I "fixed" it in this case by 1) calling readLines(), which still loads all the data, but only has one 'string' variable overhead for each line. This loads the entire file using about 1.7Gb. Then, when I call lines.sort(), I pass a function to key that splits on tabs and returns the right column value, converted to an int. This is slow computationally, and memory-intensive overall, but it works. Learned a ton about variable allocation overhad today :D
Here is a rough estimate of the memory needed, based on the constants derived from your example. At a minimum you have to figure the Python internal object overhead for each split line, plus the overhead for each string.
It estimates 9.1 GB to store the file in memory, assuming the following constants, which are off by a bit, since you're only using part of each line:
1.5 GB file size
31,164,015 total lines
each line split into a list with 4 pieces
Code:
import sys
def sizeof(lst):
return sys.getsizeof(lst) + sum(sys.getsizeof(v) for v in lst)
GIG = 1024**3
file_size = 1.5 * GIG
lines = 31164015
num_cols = 4
avg_line_len = int(file_size / float(lines))
val = 'a' * (avg_line_len / num_cols)
lst = [val] * num_cols
line_size = sizeof(lst)
print 'avg line size: %d bytes' % line_size
print 'approx. memory needed: %.1f GB' % ((line_size * lines) / float(GIG))
Returns:
avg line size: 312 bytes
approx. memory needed: 9.1 GB
I don't know about the analysis of the memory usage, but you might try this to get it to work without running out of memory. You'll sort into a new file which is accessed using a memory mapping (I've been led to believe this will work efficiently [in terms of memory]). Mmap has some OS specific workings, I tested this on Linux (very small scale).
This is the basic code, to make it run with a decent time efficiency you'd probably want to do a binary search on the sorted file to find where to insert the line otherwise it will probably take a long time.
You can find a file-seeking binary search algorithm in this question.
Hopefully a memory efficient way of sorting a massive file by line:
import os
from mmap import mmap
input_file = open('unsorted.txt', 'r')
output_file = open('sorted.txt', 'w+')
# need to provide something in order to be able to mmap the file
# so we'll just copy the first line over
output_file.write(input_file.readline())
output_file.flush()
mm = mmap(output_file.fileno(), os.stat(output_file.name).st_size)
cur_size = mm.size()
for line in input_file:
mm.seek(0)
tup = line.split("\t")
while True:
cur_loc = mm.tell()
o_line = mm.readline()
o_tup = o_line.split("\t")
if o_line == '' or tup[0] < o_tup[0]: # EOF or we found our spot
mm.resize(cur_size + len(line))
mm[cur_loc+len(line):] = mm[cur_loc:cur_size]
mm[cur_loc:cur_loc+len(line)] = line
cur_size += len(line)
break

Categories

Resources