unpack_from requires a buffer of at least 4 bytes - python

I am receiving a packet from client, consisting of many fields. I read all fields successfully, but when it comes to the last field which is tag_end, python gives me an error:
unpack_from requires a buffer of at least 4 bytes not found.
this is the code:
def set_bin(self, buf):
"""Reads a vector of bytes (probably received from network or
read from file) and tries to construct the packet structure
from it, by reading each packet member from the buffer. This
is somehow like deserializing the packet.
"""
assert isinstance(buf, bytearray), 'buffer type is not valid'
offset = 0
print("$$$$$$$$$$$$$$$$ set bin $$$$$$$$$$$$$$$$$")
try:
(self._tag_start, self._version, self._checksum, self._connection_id,
self._packet_seq) = Packet.PACKER_1.unpack_from(str(buf), offset)
except struct.error as e:
print(e)
raise DeserializeError(e)
except ValueError as e:
print(e)
raise DeserializeError(e)
#I=4 H=2 B=1
offset = Packet.OFFSET_GUID #14 correct
self._guid = buf[offset:offset+Packet.UUID_SIZE] #14-16 correct
offset = Packet.OFFSET_GUID + Packet.UUID_SIZE
print("$$$$$$$$$$$$$$$$ GUID read successfully $$$$$$$$$$$$$$$$$")
try:
(self._timestamp_sec, self._timestamp_microsec, self._command,
self._command_seq, self._subcommand, self._data_seq,
self._data_length) = Packet.PACKER_3.unpack_from(str(buf), offset)
except struct.error as e:
print(e)
raise DeserializeError(e)
except ValueError as e:
print(e)
raise DeserializeError(e)
print("$$$$$$$$$$$$$$$$ timestamps read successfully $$$$$$$$$$$$$$$$$")
offset = Packet.OFFSET_AUTHENTICATE
self._username = buf[offset:offset + self.USERNAME_SIZE] #Saman
offset += self.USERNAME_SIZE
print("$$$$$$$$$$$$$$$$ username read successfully $$$$$$$$$$$$$$$$$")
self._password = buf[offset:offset+self.USERNAME_SIZE]
offset += self.PASSWORD_SIZE
print("$$$$$$$$$$$$$$$$ password read successfully $$$$$$$$$$$$$$$$$")
self._data = buf[offset:offset+self._data_length]
offset = offset + self._data_length
print("$$$$$$$$$$$$$$$$ data read successfully $$$$$$$$$$$$$$$$$")
try:
(self._tag_end,) = Packet.PACKER_4.unpack_from(str(buf), offset)
except struct.error as e:
print(e)
raise DeserializeError(e)
except ValueError as e:
print(e)
raise DeserializeError(e)
print("$$$$$$$$$$$$$$$$ tag end read successfully $$$$$$$$$$$$$$$$$")
if len(buf) != Packet.PACKER.size + self._data_length:
print('failed to deserialize binary data correctly and construct the packet due to extra data')
else:
print('############### Deserialized Successfully')
and this is some constants used in the code:
STRUCT_FORMAT_STR = r'=IHIHH 16B IIHHHHH I 6c 9c' #Saman
STRUCT_FORMAT_STR_1 = r'=IHIHH'
STRUCT_FORMAT_STR_2 = r'=16B'
STRUCT_FORMAT_STR_3 = r'=IIHHHHH'
STRUCT_FORMAT_STR_4 = r'=I'
STRUCT_FORMAT_STR_5 = r'=6c'
STRUCT_FORMAT_STR_6 = r'=9c'
UUID_SIZE = 16
OFFSET_GUID = 14
#OFFSET_DATA = 48 #shifting offset data by 15 char
OFFSET_AUTHENTICATE = 48
PACKER = struct.Struct(str(STRUCT_FORMAT_STR)) #Saman
PACKER_1 = struct.Struct(str(STRUCT_FORMAT_STR_1))
PACKER_2 = struct.Struct(str(STRUCT_FORMAT_STR_2))
PACKER_3 = struct.Struct(str(STRUCT_FORMAT_STR_3))
PACKER_4 = struct.Struct(str(STRUCT_FORMAT_STR_4))
PACKER_5 = struct.Struct(str(STRUCT_FORMAT_STR_5))
PACKER_6 = struct.Struct(str(STRUCT_FORMAT_STR_6))
BYTES_TAG_START = PACKER_4.pack(TAG_START)
BYTES_TAG_END = PACKER_4.pack(TAG_END)
and initialization of the packet object, where it initializes the fields:
def init(self, **kwargs):
if 'buf' in kwargs:
self.set_bin(kwargs['buf'])
else:
assert kwargs['command'] in Packet.RTCINET_COMMANDS.values() and kwargs['subcommand'] in Packet.RTCINET_COMMANDS.values(), 'Undefined protocol command'
assert isinstance(kwargs['data'], bytearray), 'invalid type for data field'
for field in ('command', 'subcommand', 'data'):
setattr(self, '_' + field, kwargs[field])
self._tag_start = Packet.TAG_START
self._version = Packet.VERSION_CURRENT % (Packet.USHRT_MAX + 1)
self._checksum = Packet.CRC_INIT
self._connection_id = kwargs.get('connection_id', 0) % (Packet.USHRT_MAX + 1)
self._packet_seq = Packet.PACKET_SEQ
Packet.PACKET_SEQ = (Packet.PACKET_SEQ + 1) % (Packet.USHRT_MAX + 1)
self._guid = uuid.uuid4().bytes
dt = datetime.datetime.now()
self._timestamp_sec = int(time.mktime(dt.timetuple()))
self._timestamp_microsec = dt.microsecond
# self._command = kwargs['command']
self._command_seq = kwargs.get('command_seq', 0)
# self._subcommand = kwargs['subcommand']
self._data_seq = kwargs.get('data_seq', 0)
self._data_length = len(kwargs['data'])
self._username = Packet.USERNAME #Saman
self._password = Packet.PASSWORD
I have made sure that I read all fields in the right order, as it was written in the packet by the client program. but still I couldn't manage to solve this problem.
Do you have any idea how this could be solved?

The problem seems to be that you're converting things to str all over the place for no good reason.
In some places, like PACKER_1 = struct.Struct(str(STRUCT_FORMAT_STR_1)), it makes your code less readable and understandable, but doesn't affect the actual output. For example, STRUCT_FORMAT_STR_1 is already a str, so str(STRUCT_FORMAT_STR_1) is the same str.
But in other places, it's far worse than that. In particular, look at all the lines like Packet.PACKER_1.unpack_from(str(buf), offset). There, buf is a bytearray. (It has to be, because you assert it.) Calling str on a bytearray gives you the string representation of that bytearray. For example:
>>> b = bytearray(b'abc')
>>> len(b)
3
>>> s = str(b)
>>> s
"bytearray(b'abc')"
>>> len(s)
17
That string representation is obviously not generally going to have the same length as the actual buffer you're representing. So it's no wonder that you get errors about the length being wrong. (And if you got really unlucky and didn't have any such errors, you'd be reading garbage values instead.)
So, what should you do to convert the bytearray into something the struct module can handle? Nothing! As the docs say:
Several struct functions (and methods of Struct) take a buffer argument. This refers to objects that implement the Buffer Protocol and provide either a readable or read-writable buffer. The most common types used for that purpose are bytes and bytearray…

Related

Base64 Decoding in Python 2.7

I am working with the following script that reads a csv file and decodes it. When I run it, I receive an Incorrect Padding error. When I take an individual entry and perform a str.decode('base64'), I get an appropriate result (no error). What could be the problem? Could some entries be corrupt and it's throwing the whole process off?
def get_sigpair_from_csv(csv_in, start=0, skip_to_tx=None, want_tx=[]):
want_tx=set(want_tx)
skip_entries = True
with open(csv_in,'r') as f:
for nr,line in enumerate(f):
if nr<start:
if nr%100000==0:
print "skip",nr,f.tell()
continue
if nr % 10000000 == 0:
print "10m", nr
try:
# read data
cols = line.split(";",1)
tx = cols[0].strip()
if skip_to_tx and tx==skip_to_tx:
skip_entries=False
# skip this entry - already in db
continue
if skip_to_tx and skip_entries:
print "skiptx",nr, tx
continue
if want_tx and tx not in want_tx:
continue
scriptsig = cols[1].decode("base64")
sig = scriptsig_to_ecdsa_sig(scriptsig)
sig['tx'] = tx
sig['nr'] = nr
yield sig
except ValueError, ve:
#print tx,repr(ve)
pass
except Exception, e:
print tx, repr(e)
Here is an example of the output:
879b068c75492e9d860763e843212d7aed2fb81ad3ee24592b48cdf5df624dcd Error('Incorrect padding',)
However, when I do:
x = '879b068c75492e9d860763e843212d7aed2fb81ad3ee24592b48cdf5df624dcd'
x.decode('base64')
It works

unpack requires a string argument of length 24

I am not sure what I am doing wrong here but I am trying to open a file, trace1.flow, read the header information then throw the source IP and destination IP into dictionaries. This is done in Python running on a Fedora VM. I am getting the following error:
(secs, nsecs, booted, exporter, mySourceIP, myDestinationIP) = struct.unpack('IIIIII',myBuf)
struct.error: unpack requires a string argument of length 24
Here is my code:
import struct
import socket
#Dictionaries
uniqSource = {}
uniqDestination = {}
def int2quad(i):
z = struct.pack('!I', i)
return socket.inet_ntoa(z)
myFile = open('trace1.flow')
myBuf = myFile.read(8)
(magic, endian, version, headerLen) = struct.unpack('HBBI', myBuf)
print "Magic: ", hex(magic), "Endian: ", endian, "Version: ", version, "Header Length: ", headerLen
myFile.read(headerLen - 8)
try:
while(True):
myBuf = myFile.read(24)
(secs, nsecs, booted, exporter, mySourceIP, myDestinationIP) = struct.unpack('IIIIII',myBuf)
mySourceIP = int2quad(mySourceIP)
myDestinationIP = int2quad(myDestinationIP)
if mySourceIP not in uniqSource:
uniqSource[mySourceIP] = 1
else:
uniqSource[mySourceIP] += 1
if myDestinationIP not in uniqDestination:
uniqDestination[myDestinationIP] = 1
else:
uniqDestination[myDestinationIP] += 1
myFile.read(40)
except EOFError:
print "END OF FILE"
You seem to assume that file.read will raise EOFError on end of file, but this error is only raised by input() and raw_input(). file.read will simply return a string that's shorter than requested (possibly empty).
So you need to check the length after reading:
myBuf = myFile.read(24)
if len(myBuf) < 24:
break
Perhaps your have reached end-of-file. Check the length of myBuf:
len(myBuf)
It's probably less than 24 chars long. Also you don't need those extra parenthesis, and try to specify duplicated types using 'nI' like this:
secs, nsecs, booted, exporter, mySourceIP, myDestinationIP = struct.unpack('6I',myBuf)

Speed up reading wav in python

Evening,
I am working on a project that requires me to read in multichannel wav files in 32-bit float.
When I read a specific file (1 minute long, 6 channels, 48k fs) in into Matlab and measure it with tic/toc it parses the file in 2.456482 seconds.
Matlab Code for file reading speed measurement
tic
wavread('C:/data/testData/6ch.wav');
toc
When I do it in python (mind you, I'm pretty unfamiliar with python) it takes 18.1655315617 seconds!
It seems to me like the way I am doing it is inefficient (I did get it down to 18 from 28 but it's still too much...)
I stripped the code to what is relevant to this subject:
Python Code for file reading speed measurement
import wave32
import struct
import time
import numpy as np
def getWavData(inFile)
wavFile = wave32.open(inFile, 'r')
wavParams = wavFile.getparams()
nChannels = wavParams[0]
byteDepth = wavParams[1]
nFrames = wavParams[3]
wavData = np.empty([nFrames, nChannels], np.float32)
frames = wavFile.readframes(nFrames)
for i in range(nFrames):
for j in range(nChannels):
start = ( i * nChannels + j ) * byteDepth
stop = start + byteDepth
wavData[i][j] = struct.unpack('<f', frames[start:stop])[0]
return wavData
inFile = 'C:/data/testData/6ch.wav'
start = time.clock()
data2 = getWavData(inFile)
elapsed = time.clock()
elapsedNew = elapsed - start
print str(elapsedNew)
please not that wav32 is a small hack I had to perform on wave.py to enable 32-bit float reading.
"""Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import __builtin__
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
_array_fmts = None, 'b', 'h', None, 'l'
# Determine endian-ness
import struct
if struct.pack("h", 1) == "\000\001":
big_endian = 1
else:
big_endian = 0
from chunk import Chunk
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != 'RIFF':
raise Error, 'file does not start with RIFF id'
if self._file.read(4) != 'WAVE':
raise Error, 'not a WAVE file'
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == 'data':
if not self._fmt_chunk_read:
raise Error, 'data chunk before fmt chunk'
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error, 'fmt chunk and/or data chunk missing'
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return ''
if self._sampwidth > 1 and big_endian:
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tostring()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM or wFormatTag==WAVE_FORMAT_IEEE_FLOAT:
sampwidth = struct.unpack('<h', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
#sampwidth = struct.unpack('<h', chunk.read(2))[0]
#self._sampwidth = (sampwidth + 7) // 8
raise Error, 'unknown format: %r' % (wFormatTag,)
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE',):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error, 'setmark() not supported'
def getmark(self, id):
raise Error, 'no marks'
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth > 1 and big_endian:
import array
data = array.array(_array_fmts[self._sampwidth], data)
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write('RIFF')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<l4s4slhhllhh4s',
36 + self._datalength, 'WAVE', 'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, 'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<l', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<l', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<l', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
Sorry for the long code BTW :)
So my question is: is the wave.py module inherently slow (any alternatives to fix this?) or am I doing something inefficient?
I suppose I could just read in the wav header with a custom function and read the file in in a different way, but it seems like this is going to be A LOT of work, especially since I don't know a lot about 1) python and 2) file handling
Kind regards,
K.
Edit: I tried unutbu's suggestion but that does not work as scipy does not accept >16 bit.
When I try to parse the wav file through the scipy wavreader I get this message:
C:\Users\King Broos\AppData\Local\Enthought\Canopy32\System\lib\site-packages\scipy\io\wavfile.py:31: WavFileWarning: Unfamiliar format bytes
warnings.warn("Unfamiliar format bytes", WavFileWarning)
C:\Users\King Broos\AppData\Local\Enthought\Canopy32\System\lib\site-packages\scipy\io\wavfile.py:121: WavFileWarning: chunk not understood
warnings.warn("chunk not understood", WavFileWarning)
Looking into the code of wavfile.py this is the line where it throws the exception:
if (comp != 1 or size > 16):
warnings.warn("Unfamiliar format bytes", WavFileWarning)
I really need either 24 or 32 bit so I guess scipy not an option?
If you can install or have scipy, then use wavfile.read:
import scipy.io.wavfile as wavfile
sample_rate, x = wavfile.read(filename)
You might also want to study the source code, here.
Note that scipy.io.wavfile does not use Python's wave module. I'm not sure if it reads your IEEE_FLOAT format or not, but it does not do the same check as wave.py:
if wFormatTag == WAVE_FORMAT_PCM or wFormatTag==WAVE_FORMAT_IEEE_FLOAT:
sampwidth = struct.unpack('<h', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
#sampwidth = struct.unpack('<h', chunk.read(2))[0]
#self._sampwidth = (sampwidth + 7) // 8
raise Error, 'unknown format: %r' % (wFormatTag,)
so perhaps it will work out-of-the-box.
By the way, instead of making your own module, wave32.py which is almost exactly the same as wave.py from the standard library, you could use monkey-patching:
import wave
import struct
WAVE_FORMAT_IEEE_FLOAT = 0x0003
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM or wFormatTag == WAVE_FORMAT_IEEE_FLOAT:
sampwidth = struct.unpack('<h', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error, 'unknown format: %r' % (wFormatTag,)
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
wave.Wave_read._read_fmt_chunk = _read_fmt_chunk
You can also use numpy directly:
import numpy as np
fs = np.fromfile(filename, dtype=np.int32, count=1, offset=24)[0] # Hz
byte_length = np.fromfile(filename, dtype=np.int32, count=1, offset=40)[0]
To manually read pieces of metadata. I recommend using a hex editor and wave format reference to verify the locations for pieces of metadata and the offset to the start of the data chunk (might not be 40 or 44 bytes in).
To read 32-bit WAVE_FORMAT_IEEE_FLOAT:
data = np.fromfile(filename, dtype=np.float32, count=byte_length // 4, offset=44)
To read 24-bit WAVE_FORMAT_PCM:
# prepend zero-byte to each sample (since there's no np.int24)
# then flatten, convert normally and byte-shift to correct for extra byte
data = np.zeros([byte_length // 3, 4], dtype=np.int8)
data[:, 1:] = np.fromfile(filename, dtype=np.int8, count=byte_length, offset=44).reshape(-1, 3)
data = np.right_shift(data.reshape(-1).view(dtype=np.int32), 8)
data = data / 2 ** 23 # if you want to normalize
Depends on the wavefile and machine, but this seems to be ~120 times faster than a loop for a 4.4 MB 24-bit .wav file, but there's likely bigger performance gains for bigger files (until swap is required, I think there's ~5 memory copies performed, including normalization).
This assumes:
No extra chunks at the start of the file, else offset= parameters are wrong
Single channel - reshape the array and/or change the byte order for multi-channel, with something like .reshape(num_channels, -1, order='F')
Little-endian I think

python construct for protocol parsing

I am trying to mix up the power of twisted Protocol with the ductility of construct, the declarative binary data parser.
So far, my MessageReceiver protocol accumulates the data coming from the tcp channel in the following way:
def rawDataReceived(self, data):
'''
This method bufferizes the data coming from the TCP channel in the following way:
- Initially, discard the stream until a reserved character is detected
- add data to the buffer up to the expected message length unless the reserved character is met again. In that case discard the message and start again
- if the expected message length is reached, attempt to parse the message and clear the buffer
'''
if self._buffer:
index = data.find(self.reserved_character)
if index > -1:
if len(self._buffer) + index >= self._fixed_size:
self.on_message(self._buffer + data[:data.index(self._reserved_character)])
self._buffer = b''
data = data[data.index(self.reserved_character):]
[self.on_message(chunks[:self._fixed_size]) for chunks in [self.reserved_character + msg for msg in data.split(self._reserved_character) if msg]]
elif len(self._buffer) + len(data) < self._expected_size:
self._buffer = self._buffer + data
else:
self._buffer = b''
else:
try:
data = data[data.index(self._reserved_character):]
[self.on_message(chunks[:self._fixed_size]) for chunks in [self._reserved_character + msg for msg in data.split(self._reserved_character) if msg]]
except Exception, exc:
log.msg("Warning: Maybe there is no delimiter {delim} for the new message. Error: {err}".format(delim=self._reserved_character, err=str(exc)))
Now I am in need of evolving the protocol to take into consideration the fact that the message may or may not carry optional fields (thus there isn't a fixed message length anymore). I modeled (a meaningful part of) the message parser with construct in the following way:
def on_message(self, msg):
return Struct(HEADER,
Bytes(HEADER_RAW, 3),
BitStruct(OPTIONAL_HEADER_STRUCT,
Nibble(APPLICATION_SELECTOR),
Flag(OPTIONAL_HEADER_FLAG),
Padding(3)
),
If(lambda ctx: ctx.optional_header_struct[OPTIONAL_HEADER_FLAG],
Embed(Struct(None,
Byte(BATTERY_CHARGE),
Bytes(OPTIONAL_HEADER, 3)
)
)
)
).parse(msg)
So right now I am in need to change the buffering logic to pass the right chunk size to the Struct. I would like to avoid sizing up the data to be passed to the Structin the rawDataReceived method considering that the rules of what is a possible candidate for a message are known in the construct object.
Is there any way to push the buffering logic to the construct object?
Edit
I was able to partially achieved the aim to push the buffering logic inside, by simply making use of Macros and Adapters:
MY_PROTOCOL = Struct("whatever",
Anchor("begin"),
RepeatUntil(lambda obj, ctx:obj==RESERVED_CHAR, Field("garbage", 1)),
NoneOf(Embed(HEADER_SECTION), [RESERVED_CHAR]),
Anchor("end"),
Value("size", lambda ctx:ctx.end - ctx.begin)
)
This greatly simplifies the caller code (which is no longer in rawDataReceived thanks to Glyph's suggestion):
def dataReceived(self, data):
log.msg('Received data: {}'.format(bytes_to_hex(data)))
self._buffer += data
try:
container = My_PROTOCOL.parse(self._buffer)
self._buffer = self._buffer[container.size:]
d, self.d = self.d, self._create_new_transmission_deferred()
d.callback(container)
except ValidationError, err:
self._cb_error("A validation error occurred. Discarding the rest of the message. {}".format(err))
self._buffer = b''
except FieldError, err: #Incomplete message. We simply keep on buffering and retry
if len(self._buffer) >= MyMessageReceiver.MAX_GARBAGE_SIZE:
self._cb_error("Buffer overflown. No delimiter found in the stream")
Unfortunately this solution covers the requirements only partially since I could not find a way to get construct to tell me the index of the stream that produced the error and therefore I am obliged to drop the entire buffer, which is not ideal.
To get the stream position at which an error occurs, you'll need to use Anchor and write your own version of NoneOf. Assuming HEADER_SECTION is another Construct, replace the NoneOf like so:
SpecialNoneOf(Struct('example', Anchor('position'), HEADER_SECTION), [RESERVED_CHAR]))
SpecialNoneOf needs to subclass from Adapter and combine init and _validate from NoneOf with _encode and _decode from Validator. In _decode, replace
raise ValidationError("invalid object", obj)
with
raise ValidationError("invalid object", obj.header_section + " at " + obj.position)
Replace header_section with the name of the HEADER_SECTION Construct. You will have to change the structure of the resulting container or figure out a different way to use Embed to make this method work.

Unpickling mid-stream (python)

I am writing scripts to process (very large) files by repeatedly unpickling objects until EOF. I would like to partition the file and have separate processes (in the cloud) unpickle and process separate parts.
However my partitioner is not intelligent, it does not know about the boundaries between pickled objects in the file (since those boundaries depend on the object types being pickled, etc.).
Is there a way to scan a file for a "start pickled object" sentinel? The naive way would be to attempt unpickling at successive byte offsets until an object is successfully pickled, but that yields unexpected errors. It seems that for certain combinations of input, the unpickler falls out of sync and returns nothing for the rest of the file (see code below).
import cPickle
import os
def stream_unpickle(file_obj):
while True:
start_pos = file_obj.tell()
try:
yield cPickle.load(file_obj)
except (EOFError, KeyboardInterrupt):
break
except (cPickle.UnpicklingError, ValueError, KeyError, TypeError, ImportError):
file_obj.seek(start_pos+1, os.SEEK_SET)
if __name__ == '__main__':
import random
from StringIO import StringIO
# create some data
sio = StringIO()
[cPickle.dump(random.random(), sio, cPickle.HIGHEST_PROTOCOL) for _ in xrange(1000)]
sio.flush()
# read from subsequent offsets and find discontinuous jumps in object count
size = sio.tell()
last_count = None
for step in xrange(size):
sio.seek(step, os.SEEK_SET)
count = sum(1 for _ in stream_unpickle(file_obj))
if last_count is None or count == last_count - 1:
last_count = count
elif count != last_count:
# if successful, these should never print (but they do...)
print '%d elements read from byte %d' % (count, step)
print '(%d elements read from byte %d)' % (last_count, step-1)
last_count = count
The pickletools module has a dis function that shows the opcodes. It shows that there is a STOP opcode that you may be scan for:
>>> import pickle, pickletools, StringIO
>>> s = StringIO.StringIO()
>>> pickle.dump('abc', s)
>>> p = s.getvalue()
>>> pickletools.dis(p)
0: S STRING 'abc'
7: p PUT 0
10: . STOP
highest protocol among opcodes = 0
Note, using the STOP opcode is a bit tricky because the codes are of variable length, but it may serve as a useful hint about where the cutoffs are.
If you control the pickling step on the other end, then you can improve the situation by adding your own unambiguous alternative separator:
>>> sep = '\xDE\xAD\xBE\xEF'
>>> s = StringIO.StringIO()
>>> pickle.dump('abc', s)
>>> s.write(sep)
>>> pickle.dump([10, 20], s)
>>> s.write(sep)
>>> pickle.dump('def', s)
>>> s.write(sep)
>>> pickle.dump([30, 40], s)
>>> p = s.getvalue()
Before unpacking, split into separate pickles using the known separator:
>>> for pick in p.split(sep):
print pickle.loads(pick)
abc
[10, 20]
def
[30, 40]
In the pickled file, some opcodes have an argument -- a data value that follows the opcode. The data values vary in length, and can contain bytes identical to opcodes. Therefore, if you start reading the file from an arbitrary position, you have no way of knowing if you are looking at an opcode or in the middle of an argument. You must read the file from beginning and parse the opcodes.
I cooked up this function that skips one pickle from a file, i.e. reads it and parses opcodes, but does not construct the objects. It seems slightly faster than cPickle.loads on some files I have. You could rewrite this in C for more speed. (after testing this properly)
Then, you can make one pass over the whole file to get the seek position of each pickle.
from pickletools import code2op, UP_TO_NEWLINE, TAKEN_FROM_ARGUMENT1, TAKEN_FROM_ARGUMENT4
from marshal import loads as mloads
def skip_pickle(f):
"""Skip one pickle from file.
'f' is a file-like object containing the pickle.
"""
while True:
code = f.read(1)
if not code:
raise EOFError
opcode = code2op[code]
if opcode.arg is not None:
n = opcode.arg.n
if n > 0:
f.read(n)
elif n == UP_TO_NEWLINE:
f.readline()
elif n == TAKEN_FROM_ARGUMENT1:
n = ord(f.read(1))
f.read(n)
elif n == TAKEN_FROM_ARGUMENT4:
n = mloads('i' + f.read(4))
f.read(n)
if code == '.':
break
Sorry to answer my own question, and thanks to #RaymondHettinger for the idea of adding sentinels.
Here's what worked for me. I created readers and writers that use a sentinel '#S' followed by a data block length at the beginning of each 'record'. The writer has to take care to find any occurrences of '#' in the data being written and double them (into '##'). The reader then uses a look-behind regex to find sentinels, distinct from any matching values that might be in the original stream, and also verify the number of bytes between this sentinel and the subsequent one.
RecordWriter is a context manager (so multiple calls to write() can be encapsulated into a single record if needed). RecordReader is a generator.
Not sure how this is on performance. Any faster/elegant-er solutions are welcome.
import re
import cPickle
from functools import partial
from cStringIO import StringIO
SENTINEL = '#S'
# when scanning look for #S, but NOT ##S
sentinel_pattern = '(?<!#)#S' # uses negative look-behind
sentinel_re = re.compile(sentinel_pattern)
find_sentinel = sentinel_re.search
# when writing replace single # with double ##
write_pattern = '#'
write_re = re.compile(write_pattern)
fix_write = partial(write_re.sub, '##')
# when reading, replace double ## with single #
read_pattern = '##'
read_re = re.compile(read_pattern)
fix_read = partial(read_re.sub, '#')
class RecordWriter(object):
def __init__(self, stream):
self._stream = stream
self._write_buffer = None
def __enter__(self):
self._write_buffer = StringIO()
return self
def __exit__(self, et, ex, tb):
if self._write_buffer.tell():
self._stream.write(SENTINEL) # start
cPickle.dump(self._write_buffer.tell(), self._stream, cPickle.HIGHEST_PROTOCOL) # byte length of user's original data
self._stream.write(fix_write(self._write_buffer.getvalue()))
self._write_buffer = None
return False
def write(self, data):
if not self._write_buffer:
raise ValueError("Must use StreamWriter as a context manager")
self._write_buffer.write(data)
class BadBlock(Exception): pass
def verify_length(block):
fobj = StringIO(block)
try:
stated_length = cPickle.load(fobj)
except (ValueError, IndexError, cPickle.UnpicklingError):
raise BadBlock
data = fobj.read()
if len(data) != stated_length:
raise BadBlock
return data
def RecordReader(stream):
' Read one record '
accum = StringIO()
seen_sentinel = False
data = ''
while True:
m = find_sentinel(data)
if not m:
if seen_sentinel:
accum.write(data)
data = stream.read(80)
if not data:
if accum.tell():
try: yield verify_length(fix_read(accum.getvalue()))
except BadBlock: pass
return
else:
if seen_sentinel:
accum.write(data[:m.start()])
try: yield verify_length(fix_read(accum.getvalue()))
except BadBlock: pass
accum = StringIO()
else:
seen_sentinel = True
data = data[m.end():] # toss
if __name__ == '__main__':
import random
stream = StringIO()
data = [str(random.random()) for _ in xrange(3)]
# test with a string containing sentinel and almost-sentinel
data.append('abc12#jeoht38#SoSooihetS#')
count = len(data)
for i in data:
with RecordWriter(stream) as r:
r.write(i)
size = stream.tell()
start_pos = random.random() * size
stream.seek(start_pos, os.SEEK_SET)
read_data = [s for s in RecordReader(stream)]
print 'Original data: ', data
print 'After seeking to %d, RecordReader returned: %s' % (start_pos, read_data)

Categories

Resources