unpack_from requires a buffer of at least 784 bytes - python

I'm running the following function for an ML model.
def get_images(filename):
bin_file = open(filename, 'rb')
buf = bin_file.read() # all the file are put into memory
bin_file.close() # release the measure of operating system
index = 0
magic, num_images, num_rows, num_colums = struct.unpack_from(big_endian + four_bytes, buf, index)
index += struct.calcsize(big_endian + four_bytes)
images = [] # temp images as tuple
for x in range(num_images):
im = struct.unpack_from(big_endian + picture_bytes, buf, index)
index += struct.calcsize(big_endian + picture_bytes)
im = list(im)
for i in range(len(im)):
if im[i] > 1:
im[i] = 1
However, I am receiving an error at the line:
im = struct.unpack_from(big_endian + picture_bytes, buf, index)
With the error:
error: unpack_from requires a buffer of at least 784 bytes
I have noticed this error is only occurring at certain iterations. I cannot figure out why this is might be the case. The dataset is a standard MNIST dataset which is freely available online.
I have also looked through similar questions on SO (e.g. error: unpack_from requires a buffer) but they don't seem to resolve the issue.

You didn't include the struct formats in your mre so it is hard to say why you are getting the error. Either you are using a partial/corrupted file or your struct formats are wrong.
This answer uses the test file 't10k-images-idx3-ubyte.gz' and file formats found at http://yann.lecun.com/exdb/mnist/
Open the file and read it into a bytes object (gzip is used because of the file's type).
import gzip,struct
with gzip.open(r'my\path\t10k-images-idx3-ubyte.gz','rb') as f:
data = bytes(f.read())
print(len(data))
The file format spec says the header is 16 bytes (four 32 bit ints) - separate it from the pixels with a slice then unpack it
hdr,pixels = data[:16],data[16:]
magic, num_images, num_rows, num_cols = struct.unpack(">4L",hdr)
# print(len(hdr),len(pixels))
# print(magic, num_images, num_rows, num_cols)
There are a number of ways to iterate over the individual images.
img_size = num_rows * num_cols
imgfmt = "B"*img_size
for i in range(num_images):
start = i * img_size
end = start + img_size
img = pixels[start:end]
img = struct.unpack(imgfmt,img)
# do work on the img
Or...
imgfmt = "B"*img_size
for img in struct.iter_unpack(imgfmt, pixels):
img = [p if p == 0 else 1 for p in img]
The itertools grouper recipe would probably also work.

Related

struct.error: unpack requires a buffer of 2 bytes

Im trying to identify the musical note of a sound from a .wav file using python, but im getting the error above when using "struct"
I couldn't gather a lot of info from the documents for struct or other websites on how to resolve this issue.
I have seen errors like:
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 1024 bytes
but the error seems to be for a different reason.
import numpy as np
import math
import wave
import os
import struct
import matplotlib.pyplot as plt
def note_detect(audio_file):
#-------------------------------------------
#here we are just storing our sound file as a numpy array
#you can also use any other method to store the file as an np array
file_length=audio_file.getnframes()
f_s=audio_file.getframerate() #sampling frequency
sound = np.zeros(file_length) #blank array
for i in range(file_length) :
wdata=audio_file.readframes(1)
data=struct.unpack("<h",wdata)
sound[i] = int(data[0])
plt.plot(sound)
plt.show()
sound=np.divide(sound,float(2**15)) #scaling it to 0 - 1
counter = audio_file.getnchannels() #number of channels mono/sterio
#-------------------------------------------
plt.plot(sound)
plt.show()
#fourier transformation from numpy module
fourier = np.fft.fft(sound)
fourier = np.absolute(fourier)
imax=np.argmax(fourier[0:int(file_length/2)]) #index of max element
plt.plot(fourier)
plt.show()
#peak detection
i_begin = -1
threshold = 0.3 * fourier[imax]
for i in range (0,imax+100):
if fourier[i] >= threshold:
if(i_begin==-1):
i_begin = i
if(i_begin!=-1 and fourier[i]<threshold):
break
i_end = i
imax = np.argmax(fourier[0:i_end+100])
freq=(imax*f_s)/(file_length*counter) #formula to convert index into sound frequency
#frequency database
note=0
name = np.array(["C0","C#0","D0","D#0","E0","F0","F#0","G0","G#0","A0","A#0","B0","C1","C#1","D1","D#1","E1","F1","F#1","G1","G#1","A1","A#1","B1","C2","C#2","D2","D#2","E2","F2","F#2","G2","G2#","A2","A2#","B2","C3","C3#","D3","D3#","E3","F3","F3#","G3","G3#","A3","A3#","B3","C4","C4#","D4","D4#","E4","F4","F4#","G4","G4#","A4","A4#","B4","C5","C5#","D5","D5#","E5","F5","F5#","G5","G5#","A5","A5#","B5","C6","C6#","D6","D6#","E6","F6","F6#","G6","G6#","A6","A6#","B6","C7","C7#","D7","D7#","E7","F7","F7#","G7","G7#","A7","A7#","B7","C8","C8#","D8","D8#","E8","F8","F8#","G8","G8#","A8","A8#","B8","Beyond B8"])
frequencies = np.array([16.35,17.32,18.35,19.45,20.60,21.83,23.12,24.50,25.96 ,27.50 ,29.14 ,30.87 ,32.70 ,34.65 ,36.71 ,38.89 ,41.20 ,43.65 ,46.25 ,49.00 ,51.91 ,55.00 ,58.27 ,61.74 ,65.41 ,69.30 ,73.42 ,77.78 ,82.41 ,87.31 ,92.50 ,98.00 ,103.83 ,110.00 ,116.54 ,123.47 ,130.81 ,138.59 ,146.83 ,155.56 ,164.81 ,174.61 ,185.00 ,196.00 ,207.65 ,220.00 ,233.08 ,246.94 ,261.63 ,277.18 ,293.66 ,311.13 ,329.63 ,349.23 ,369.99 ,392.00 ,415.30 ,440.00 ,466.16 ,493.88 ,523.25 ,554.37 ,587.33 ,622.25 ,659.26 ,698.46 ,739.99 ,783.99 ,830.61 ,880.00 ,932.33 ,987.77 ,1046.50 ,1108.73 ,1174.66 ,1244.51 ,1318.51 ,1396.91 ,1479.98 ,1567.98 ,1661.22 ,1760.00 ,1864.66 ,1975.53 ,2093.00 ,2217.46 ,2349.32 ,2489.02 ,2637.02 ,2793.83 ,2959.96 ,3135.96 ,3322.44 ,3520.00 ,3729.31 ,3951.07 ,4186.01 ,4434.92 ,4698.64 ,4978.03 ,5274.04 ,5587.65 ,5919.91 ,6271.93 ,6644.88 ,7040.00 ,7458.62 ,7902.13,8000])
#searching for matched frequencies
for i in range(0,frequencies.size-1):
if(freq<frequencies[0]):
note=name[0]
break
if(freq>frequencies[-1]):
note=name[-1]
break
if freq>=frequencies[i] and frequencies[i+1]>=freq :
if freq-frequencies[i]<(frequencies[i+1]-frequencies[i])/2 :
note=name[i]
else :
note=name[i+1]
break
return note
if __name__ == "__main__":
path = os.getcwd()
file_name = path + "\\" + "recording0.wav"
audio_file = wave.open(file_name)
Detected_Note = note_detect(audio_file)
print("\n\tDetected Note = " + str(Detected_Note))
The full error on line 23:
Traceback (most recent call last):
File "C:\Users\m8\Desktop\programing_stuff\python-stuff\minecraft_flute_player - 12-08-2022\app.py", line 86, in <module>
Detected_Note = note_detect(audio_file)
File "C:\Users\m8\Desktop\programing_stuff\python-stuff\minecraft_flute_player - 12-08-2022\app.py", line 23, in note_detect
data=struct.unpack("<h",wdata)
struct.error: unpack requires a buffer of 2 bytes
Thanks for the help.
What I assume is happening here is the size of the frame isn't 2 bytes as you expected.
When stating <h you are stating that you are going to extract 2 bytes from each frame. See the stuct documentation for more on that.
You can use the getparams function to better understand the structure of the wav file.
>>> audio_file.getparams()
_wave_params(nchannels=1, sampwidth=2, framerate=44100, nframes=22050, comptype='NONE', compname='not compressed')
The parameters which are interesting are nchannels and sampwidth.
You can calculate sampwidth * nchannels to understand the amount of bytes you need to extract from the frame for this WAV file.
In this example, you have sampwidth * nchannels = 1 * 2 = 2 bytes per frame.
More information can be found in this answer which shows different cases of frame sizes.

Can a numpy file be created without defining an array?

I have some very large data to deal with. I'd like to be able to use np.load(filename, mmap_mode="r+") to use these files on disk rather than RAM. My issue is that creating them in RAM causes the exact problem I'm trying to avoid.
I know about np.memmap already and that is a potential solution, but creating a memmap and then saving the array using np.save(filename, memmap) means that I'd be doubling the disk space requirement even if only briefly and that isn't always an option. Primarily I don't want to use memmaps because the header information in .npy files (namely shape and dtype) is useful to have.
My question is, can I create a numpy file without needing to first create it in memory? That is, can I create a numpy file by just giving a dtype and a shape? The idea would be along the lines of np.save(filename, np.empty((x, y, z))) but I'm assuming that empty requires it to be assigned in memory before saving.
My current solution is:
def create_empty_numpy_file(filename, shape, dtype=np.float64):
with tempfile.TemporaryFile() as tmp:
memmap = np.memmap(tmp, dtype, mode="w+", shape=shape)
np.save(filename, memmap)
EDIT
My final solution based on bnaeker's answer and a few details from numpy.lib.format:
class MockFlags:
def __init__(self, shape, c_contiguous=True):
self.c_contiguous = c_contiguous
self.f_contiguous = (not c_contiguous) or (c_contiguous and len(shape) == 1)
class MockArray:
def __init__(self, shape, dtype=np.float64, c_contiguous=True):
self.shape = shape
self.dtype = np.dtype(dtype)
self.flags = MockFlags(shape, c_contiguous)
def save(self, filename):
if self.dtype.itemsize == 0:
buffersize = 0
else:
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // self.dtype.itemsize, 1)
n_chunks, remainder = np.divmod(
np.product(self.shape) * self.dtype.itemsize, buffersize
)
with open(filename, "wb") as f:
np.lib.format.write_array_header_2_0(
f, np.lib.format.header_data_from_array_1_0(self)
)
for chunk in range(n_chunks):
f.write(b"\x00" * buffersize)
f.write(b"\x00" * remainder)
The Numpy file format is really simple. There are a few under-documented functions you can use to create the required header bytes from the metadata needed to build an array, without actually building one.
import numpy as np
def create_npy_header_bytes(
shape, dtype=np.float64, fortran_order=False, format_version="2.0"
):
# 4 or 2-byte unsigned integer, depending on version
n_size_bytes = 4 if format_version[0] == "2" else 2
magic = b"\x93NUMPY"
version_info = (
int(each).to_bytes(1, "little") for each in format_version.split(".")
)
# Keys are supposed to be alphabetically sorted
header = {
"descr": np.lib.format.dtype_to_descr(np.dtype(dtype)),
"fortran_order": fortran_order,
"shape": shape
}
# Pad header up to multiple of 64 bytes
header_bytes = str(header).encode("ascii")
header_len = len(header_bytes)
current_length = header_len + len(magic) + 2 + n_size_bytes # for version information
required_length = int(np.ceil(current_length / 64.0) * 64)
padding = required_length - current_length - 1 # For newline
header_bytes += b" " * padding + b"\n"
# Length of the header dict, including padding and newline
length = len(header_bytes).to_bytes(n_size_bytes, "little")
return b"".join((magic, *version_info, length, header_bytes))
You can test that it's equivalent with this snippet:
import numpy as np
import io
x = np.zeros((10, 3, 4))
first = create_npy_header_bytes(x.shape)
stream = io.BytesIO()
np.lib.format.write_array_header_2_0(
stream, np.lib.format.header_data_from_array_1_0(x)
)
print(f"Library: {stream.getvalue()}")
print(f"Custom: {first}")
You should see something like:
Library: b"\x93NUMPY\x02\x00t\x00\x00\x00{'descr': '<f8', 'fortran_order': False, 'shape': (10, 3, 4), } \n"
Custom: b"\x93NUMPY\x02\x00t\x00\x00\x00{'descr': '<f8', 'fortran_order': False, 'shape': (10, 3, 4)} \n"
which match, except for the trailing comma inside the header dict representation. That will not matter, as this is required to be a valid Python literal string representation of a dict, which will happily ignore that comma if it's there.
As an alternative approach, you could mock out an object which has the required fields for the library functions used to make the header itself. For np.lib.format.header_data_from_array_1_0, these seem to be .flags (which must have a field c_contiguous and/or f_contiguous), and a dtype. That's actually much simpler, and would look like:
import numpy as np
import io
class MockFlags:
def __init__(self, shape, c_contiguous=True):
self.c_contiguous = c_contiguous
self.f_contiguous = (not c_contiguous) or (c_contiguous and len(shape) == 1)
class MockArray:
def __init__(self, shape, dtype=np.float64, c_contiguous=True):
self.shape = shape
self.dtype = np.dtype(dtype)
self.flags = MockFlags(shape, c_contiguous)
mock = MockArray((10, 3, 4))
stream = io.BytesIO()
np.lib.format.write_array_header_2_0(
stream, np.lib.format.header_data_from_array_1_0(mock)
)
print(stream.getvalue())
You should see:
b"\x93NUMPY\x02\x00t\x00\x00\x00{'descr': '<f8', 'fortran_order': False, 'shape': (10, 3, 4), } \n"
which happily matches what we have above, but without having to do the shitty work of counting bytes, mucking with padding, etc. Much more betterer :)

Python Read part of large binary file

I have large binary file (size ~2.5Gb). It contains header (size 336 byte) and seismic signal data (x, y and z channels) with type int32. Count of discrete is 223 200 000.
I need read part of signal. For example, I want get part of signal in interval of discrete [216 000 000, 219 599 999].
I wrote the function:
def reading(path, start_moment, end_moment):
file_data = open(path, 'rb')
if start_moment is not None:
bytes_value = start_moment * 4 * 3
file_data.seek(336 + bytes_value)
else:
file_data.seek(336)
if end_moment is None:
try:
signals = np.fromfile(file_data, dtype=np.int32)
except MemoryError:
return None
finally:
file_data.close()
else:
moment_count = end_moment - start_moment + 1
try:
signals = np.fromfile(file_data, dtype=np.int32,
count=moment_count * 3)
except MemoryError:
return None
finally:
file_data.close()
channel_count = 3
signal_count = signals.shape[0] // channel_count
signals = np.reshape(signals, newshape=(signal_count, channel_count))
return signals
If I run script with the function in PyCharm IDE I get error:
Traceback (most recent call last): File
"D:/AppsBuilding/test/testReadBaikal8.py", line 41, in
signal_2 = reading(path=path, start_moment=216000000, end_moment=219599999) File
"D:/AppsBuilding/test/testReadBaikal8.py", line 27, in reading
count=moment_count * 3) OSError: obtaining file position failed
But if I run script with parameters: start_moment=7200000, end_moment=10799999 all ok.
On my PC was installed Windows7 32bit. Memory size is 1.95Gb
Please, help me resolve this problem.
Divide the file into small segments, freeing memory after each small
piece of content is processed
def read_in_block(file_path):
BLOCK_SIZE = 1024
with open(file_path, "r") as f:
while True:
block = f.read(BLOCK_SIZE)
if block:
yield block
else:
return
print block
I don't use Numpy but I don't see anything obviously wrong with your code. However, you say the file is approximately 2.5 GB in size. A triplet index of 219,599,999 requires a file at least 2.45 GB in size:
$ calc
; 219599999 * 4 * 3
2635199988
; 2635199988 / 1024^3
~2.45422123745083808899
Are you sure your file is really that large?
I also don't use MS Windows but the following toy programs work for me. The first creates a data file that mimics the structure of yours. The second shows that it can read the final data triplet. What happens if you run these on your system?
fh = open('x', 'wb')
fh.write(b'0123456789')
for i in range(0, 1000):
s = bytes('{:03d}'.format(i), 'ascii')
fh.write(b'a' + s + b'b' + s + b'c' + s)
Read the data from file x:
fh = open('x', 'rb')
triplet = 999
fh.seek(10 + triplet * 3 * 4)
data = fh.read(3 * 4)
print(data)

Python struct unpack with irregular field sizes

I'm trying to unpack a struct containing some metadata and an image. It's all bits, but the way that contents were packed is quite strange. Instead of elements being aligned on a byte, they're aligned on every 6 bits. The file format and contents are listed in the image below and can be also accessed at the link: http://etlcdb.db.aist.go.jp/etlcdb/etln/form_c.htm.
The only fields that I am interested in are the JIS code and the 16 gray level image data that is the last field. This data is taken from http://etlcdb.db.aist.go.jp/etlcdb/etln/etl4/etl4.htm.
The main problem that I'm facing is the fact that the fields are 6 bit aligned. I don't know how to properly unpack the struct.
As an aside, with PIL, it's also unclear how to handle 4 bit images. The mode 1 isn't working correctly, as expected..
import struct
from PIL import Image, ImageFont, ImageDraw
filename = 'ETL4/ETL4C'
skip = 0
record_size = 2952
with open(filename, 'r') as f:
f.seek(skip * record_size)
s = f.read(record_size)
r = struct.unpack('>9x1i203x2736s', s)
print r[1]
i1 = Image.frombytes('1', (72, 76), r[1], 'raw')
# fn = 'ETL9B_{:d}_{:d}.png'.format((r[0]-1)%20+1, hex(r[1])[-4:])
fn = 'name1.png'
i1.save(fn, 'PNG')

Matlab to Python Conversion binary file read

I am new to both Matlab and Python and I have to convert a program in Matlab to Python. I am not sure how to typecast the data after reading from the file in Python. The file used is a binary file.
Below is the Matlab code:
fid = fopen (filename, 'r');
fseek (fid, 0, -1);
meta = zeros (n, 9, 'single');
v = zeros (n, 128, 'single');
d = 0;
for i = 1:n
meta(i,:) = fread (fid, 9, 'float');
d = fread (fid, 1, 'int');
v(i,:) = fread (fid, d, 'uint8=>single');
end
I have written the below program in python:
fid = open(filename, 'r')
fid.seek(0 , 0)
meta = np.zeros((n,9),dtype = np.float32)
v = np.zeros((n,128),dtype = np.float32)
for i in range(n):
data_str = fid.read(9);
meta[1,:] = unpack('f', data_str)
For this unpack, I getting the error as
"unpack requires a string argument of length 4"
.
Please suggest someway to make it work.
I looked a little in the problem mainly because I need this in the near future, too. Turns out there is a very simple solution using numpy, assuming you have a matlab matrix stored like I do.
import numpy as np
def read_matrix(file_name):
return np.fromfile(file_name, dtype='<f') # little-endian single precision float
arr = read_matrix(file_path)
print arr[0:10] #sample data
print len(arr) # number of elements
The data type (dtype) you must find out yourself. Help on this is here. I used fwrite(fid,value,'single'); to store the data in matlab, if you have the same, the code above will work.
Note, that the returned variable is a list; you'll have to format it to match the original shape of your data, in my case len(arr) is 307200 from a matrix of the size 15360 x 20.

Categories

Resources