python script to record online live streaming videos - python

i am developing a script to download online live streaming videos.
My Script:
print "Recording video..."
response = urllib2.urlopen("streaming online video url")
filename = time.strftime("%Y%m%d%H%M%S",time.localtime())+".avi"
f = open(filename, 'wb')
video_file_size_start = 0
video_file_size_end = 1048576 * 7 # end in 7 mb
block_size = 1024
while True:
try:
buffer = response.read(block_size)
if not buffer:
break
video_file_size_start += len(buffer)
if video_file_size_start > video_file_size_end:
break
f.write(buffer)
except Exception, e:
logger.exception(e)
f.close()
above script is working fine to download 7Mb of video from live streaming contents and storing it in to *.avi files.
However, I would like to download just 10 secs of video regardless of the file size and store it in avi file.
I tried different possibilities but to no success.
Could any one please share your knowledge here to fix my issue.
Thanks in advance.

I don't think there is any way of doing that without constantly analysing the video, which will be way to costly. So you could take a guess of how many MB you need and once done check it's long enough. If it's too long, just cut it. Instead of guessing you could also build up some statistics of how much you need to retrieve. You could also replace the while True with:
start_time_in_seconds = time.time()
time_limit = 10
while time.time() - start_time_in_seconds < time_limit:
...
This should give you at least 10 seconds of video, unless connecting takes too much time (less then 10 seconds then) or server sends more for buffering (but that's unlikely for live streams).

You can use the 'Content-Length' header to retrieve the video filesize if it exists.
video_file_size_end = response.info().getheader('Content-Length')

response.read() does not work. response.iter_content() seem to do the trick.
import time
import requests
print("Recording video...")
filename = time.strftime("/tmp/" + "%Y%m%d%H%M%S",time.localtime())+".avi"
file_handle = open(filename, 'wb')
chunk_size = 1024
start_time_in_seconds = time.time()
time_limit = 10 # time in seconds, for recording
time_elapsed = 0
url = "http://demo.codesamplez.com/html5/video/sample"
with requests.Session() as session:
response = session.get(url, stream=True)
for chunk in response.iter_content(chunk_size=chunk_size):
if time_elapsed > time_limit:
break
# to print time elapsed
if int(time.time() - start_time_in_seconds)- time_elapsed > 0 :
time_elapsed = int(time.time() - start_time_in_seconds)
print(time_elapsed, end='\r', flush=True)
if chunk:
file_handle.write(chunk)
file_handle.close()

Related

using python stream downloading file with server limit

I tried to download file from a server using python, sometimes the file is very large, I would like to have some progress bar, one way to do this I can come up with is to download in a stream, so that I can print the progress. Currently I have tried the standard urlopen, urlretrieve, and requests module (with stream on).
Obviously, urlopen cannot download file in stream, requests module support this, however, the server has limit on the file I can download at one time (its limit is 1). So everytime, I tried to use requests, it only get the webpage told me to wait, is there any other way to do this?
I have very recently downloaded many types of media with this function:
import sys
import requests
import time
def download_resource(domain, url, file_name = None, download = True):
cookies = {}
s = requests.Session()
s.config['keep_alive'] = True
#add your own cookies here, I have a specific function I call
#for my application but yours is different
r = s.get(url, cookies = cookies, stream = True)
if not r.ok:
print "error in downloading"
return -1
file_size = int(r.headers['content-length'])
if not file_name:
try:
temp = r.headers['content-disposition']
except Exception as e:
pass
#failing download
return -1
else:
if not temp:
return -1
else:
file_name = temp.split("filename=")[-1]
return_obj["filename"] = file_name
#print "File size:", file_size
#print "\n", str(self.entire_size / float(1024*1024*1024)), "\n"
print "Downloading:", file_name
if download:
with open(file_name, "wb") as fh:
count = 1
chunk_size = 1048576
start_time = time.time()
try:
for block in r.iter_content(chunk_size):
total_time = time.time() - start_time
percent = count*chunk_size/float(file_size) * 100.0
fraction = int(percent/5)
download_speed = 1.0 / total_time
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%% %3.2f MB/s " % ('='* fraction , percent, download_speed))
sys.stdout.flush()
if not block:
break
fh.write(block)
count += 1
start_time = time.time()
except Exception as e:
print e
finally:
#close up the stream
r.close()

How to measure download speed and progress using requests?

I am using requests to download files, but for large files I need to check the size of the file on disk every time because I can't display the progress in percentage and I would also like to know the download speed. How can I go about doing it ? Here's my code :
import requests
import sys
import time
import os
def downloadFile(url, directory) :
localFilename = url.split('/')[-1]
r = requests.get(url, stream=True)
start = time.clock()
f = open(directory + '/' + localFilename, 'wb')
for chunk in r.iter_content(chunk_size = 512 * 1024) :
if chunk :
f.write(chunk)
f.flush()
os.fsync(f.fileno())
f.close()
return (time.clock() - start)
def main() :
if len(sys.argv) > 1 :
url = sys.argv[1]
else :
url = raw_input("Enter the URL : ")
directory = raw_input("Where would you want to save the file ?")
time_elapsed = downloadFile(url, directory)
print "Download complete..."
print "Time Elapsed: " + time_elapsed
if __name__ == "__main__" :
main()
I think one way to do it would be to read the file every time in the for loop and calculate the percentage of progress based on the header Content-Length. But that would be again an issue for large files(around 500MB). Is there any other way to do it?
see here: Python progress bar and downloads
i think the code would be something like this, it should show the average speed since start as bytes per second:
import requests
import sys
import time
def downloadFile(url, directory) :
localFilename = url.split('/')[-1]
with open(directory + '/' + localFilename, 'wb') as f:
start = time.clock()
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
dl = 0
if total_length is None: # no content length header
f.write(r.content)
else:
for chunk in r.iter_content(1024):
dl += len(chunk)
f.write(chunk)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s] %s bps" % ('=' * done, ' ' * (50-done), dl//(time.clock() - start)))
print ''
return (time.clock() - start)
def main() :
if len(sys.argv) > 1 :
url = sys.argv[1]
else :
url = raw_input("Enter the URL : ")
directory = raw_input("Where would you want to save the file ?")
time_elapsed = downloadFile(url, directory)
print "Download complete..."
print "Time Elapsed: " + time_elapsed
if __name__ == "__main__" :
main()
An improved version of the accepted answer for python3 using io.Bytes (write to memory), result in Mbps, support for ipv4/ipv6, size and port arguments.
import sys, time, io, requests
def speed_test(size=5, ipv="ipv4", port=80):
if size == 1024:
size = "1GB"
else:
size = f"{size}MB"
url = f"http://{ipv}.download.thinkbroadband.com:{port}/{size}.zip"
with io.BytesIO() as f:
start = time.perf_counter()
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
dl = 0
if total_length is None: # no content length header
f.write(r.content)
else:
for chunk in r.iter_content(1024):
dl += len(chunk)
f.write(chunk)
done = int(30 * dl / int(total_length))
sys.stdout.write("\r[%s%s] %s Mbps" % ('=' * done, ' ' * (30-done), dl//(time.perf_counter() -
start) / 100000))
print( f"\n{size} = {(time.perf_counter() - start):.2f} seconds")
Usage Examples:
speed_test()
speed_test(10)
speed_test(50, "ipv6")
speed_test(1024, port=8080)
Output Sample:
[==============================] 61.34037 Mbps
100MB = 17.10 seconds
Available Options:
size: 5, 10, 20, 50, 100, 200, 512, 1024
ipv: ipv4, ipv6
port: 80, 81, 8080
Updated on 20221011:
time.perf_counter() replaced time.clock(), which has been deprecated on python 3.3 (kudos to shiro)
I had a problem with a specific slow server to download a big file
no Content-Length header.
big file (42GB),
no compression,
slow server (<1MB/s),
Beeing this big, I had also problem with memory usage during the request. Requests doesn't write output on file, like urlibs does, looks like it keep it in memory.
No content length header makes the accepted answer.. not monitoring.
So I wrote this -basic- method to monitor speed during the csv download following just the "requests" documentation.
It needs a fname (complete output path), a link (http or https) and you can specify custom headers.
BLOCK=5*1024*1024
try:
with open(fname, 'wb') as f:
r = requests.get(link, headers=headers, stream=True)
## This is, because official dozumentation suggest it,
## saying it's more reliable thatn cycling directly on iterlines, to don't lose data
lines = r.iter_lines()
## Init the base vars, for monitor and block management
## Obj is a byte object, because iterlines returno objects
tsize = 0; obj = bytearray(); t0=time.time(); i=0;
for line in lines:
## calculate the line size, in bytes, and add to the byte object
tsize+=len(line)
obj.extend(line)
## When condition reached,
if tsize > BLOCK:
## Increment the block number
i+=1;
## Calculate the speed.. this is in MB/s,
## but you can easily change to KB/s, or Blocks/s
t1=time.time()
t=t1-t0;
speed=round(5/t, 2);
## Write the block to the file.
f.write(obj)
## Write stats
print('got', i*5, 'MB ', 'block' ,i, ' #', speed,'MB/s')
## Reinit all the base vars, for a new block
obj=bytearray(); tsize=0; t0=time.time()
## Write the last block part to the file.
f.write(obj)
except Exception as e:
print("Error: ", e, 0)

How to perform time limited response download with python requests?

When downloading a large file with python, I want to put a time limit not only for the connection process, but also for the download.
I am trying with the following python code:
import requests
r = requests.get('http://ipv4.download.thinkbroadband.com/1GB.zip', timeout = 0.5, prefetch = False)
print r.headers['content-length']
print len(r.raw.read())
This does not work (the download is not time limited), as correctly noted in the docs: https://requests.readthedocs.org/en/latest/user/quickstart/#timeouts
This would be great if it was possible:
r.raw.read(timeout = 10)
The question is, how to put a time limit to the download?
And the answer is: do not use requests, as it is blocking. Use non-blocking network I/O, for example eventlet:
import eventlet
from eventlet.green import urllib2
from eventlet.timeout import Timeout
url5 = 'http://ipv4.download.thinkbroadband.com/5MB.zip'
url10 = 'http://ipv4.download.thinkbroadband.com/10MB.zip'
urls = [url5, url5, url10, url10, url10, url5, url5]
def fetch(url):
response = bytearray()
with Timeout(60, False):
response = urllib2.urlopen(url).read()
return url, len(response)
pool = eventlet.GreenPool()
for url, length in pool.imap(fetch, urls):
if (not length):
print "%s: timeout!" % (url)
else:
print "%s: %s" % (url, length)
Produces expected results:
http://ipv4.download.thinkbroadband.com/5MB.zip: 5242880
http://ipv4.download.thinkbroadband.com/5MB.zip: 5242880
http://ipv4.download.thinkbroadband.com/10MB.zip: timeout!
http://ipv4.download.thinkbroadband.com/10MB.zip: timeout!
http://ipv4.download.thinkbroadband.com/10MB.zip: timeout!
http://ipv4.download.thinkbroadband.com/5MB.zip: 5242880
http://ipv4.download.thinkbroadband.com/5MB.zip: 5242880
When using Requests' prefetch=False parameter, you get to pull in arbitrary-sized chunks of the respone at a time (rather than all at once).
What you'll need to do is tell Requests not to preload the entire request and keep your own time of how much you've spent reading so far, while fetching small chunks at a time. You can fetch a chunk using r.raw.read(CHUNK_SIZE). Overall, the code will look something like this:
import requests
import time
CHUNK_SIZE = 2**12 # Bytes
TIME_EXPIRE = time.time() + 5 # Seconds
r = requests.get('http://ipv4.download.thinkbroadband.com/1GB.zip', prefetch=False)
data = ''
buffer = r.raw.read(CHUNK_SIZE)
while buffer:
data += buffer
buffer = r.raw.read(CHUNK_SIZE)
if TIME_EXPIRE < time.time():
# Quit after 5 seconds.
data += buffer
break
r.raw.release_conn()
print "Read %s bytes out of %s expected." % (len(data), r.headers['content-length'])
Note that this might sometimes use a bit more than the 5 seconds allotted as the final r.raw.read(...) could lag an arbitrary amount of time. But at least it doesn't depend on multithreading or socket timeouts.
Run download in a thread which you can then abort if not finished on time.
import requests
import threading
URL='http://ipv4.download.thinkbroadband.com/1GB.zip'
TIMEOUT=0.5
def download(return_value):
return_value.append(requests.get(URL))
return_value = []
download_thread = threading.Thread(target=download, args=(return_value,))
download_thread.start()
download_thread.join(TIMEOUT)
if download_thread.is_alive():
print 'The download was not finished on time...'
else:
print return_value[0].headers['content-length']

Cancel slow download in python

I am downloading files over http and displaying the progress using urllib and the following code - which works fine:
import sys
from urllib import urlretrieve
urlretrieve('http://example.com/file.zip', '/tmp/localfile', reporthook=dlProgress)
def dlProgress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r" + "progress" + "...%d%%" % percent)
sys.stdout.flush()
Now I would also like to restart the download if it is going too slow (say less than 1MB in 15 seconds). How can I achieve this?
This should work.
It calculates the actual download rate and aborts if it is too low.
import sys
from urllib import urlretrieve
import time
url = "http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tgz" # 14.135.620 Byte
startTime = time.time()
class TooSlowException(Exception):
pass
def convertBToMb(bytes):
"""converts Bytes to Megabytes"""
bytes = float(bytes)
megabytes = bytes / 1048576
return megabytes
def dlProgress(count, blockSize, totalSize):
global startTime
alreadyLoaded = count*blockSize
timePassed = time.time() - startTime
transferRate = convertBToMb(alreadyLoaded) / timePassed # mbytes per second
transferRate *= 60 # mbytes per minute
percent = int(alreadyLoaded*100/totalSize)
sys.stdout.write("\r" + "progress" + "...%d%%" % percent)
sys.stdout.flush()
if transferRate < 4 and timePassed > 2: # download will be slow at the beginning, hence wait 2 seconds
print "\ndownload too slow! retrying..."
time.sleep(1) # let's not hammer the server
raise TooSlowException
def main():
try:
urlretrieve(url, '/tmp/localfile', reporthook=dlProgress)
except TooSlowException:
global startTime
startTime = time.time()
main()
if __name__ == "__main__":
main()
Something like this:
class Timeout(Exception):
pass
def try_one(func,t=3):
def timeout_handler(signum, frame):
raise Timeout()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(t) # triger alarm in 3 seconds
try:
t1=time.clock()
func()
t2=time.clock()
except Timeout:
print('{} timed out after {} seconds'.format(func.__name__,t))
return None
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return t2-t1
The call 'try_one' with the func you want to time out and the time to timeout:
try_one(downloader,15)
OR, you can do this:
import socket
socket.setdefaulttimeout(15)
HolyMackerel! Use the tools!
import urllib2, sys, socket, time, os
def url_tester(url = "http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tgz"):
file_name = url.split('/')[-1]
u = urllib2.urlopen(url,None,1) # Note the timeout to urllib2...
file_size = int(u.info().getheaders("Content-Length")[0])
print ("\nDownloading: {} Bytes: {:,}".format(file_name, file_size))
with open(file_name, 'wb') as f:
file_size_dl = 0
block_sz = 1024*4
time_outs=0
while True:
try:
buffer = u.read(block_sz)
except socket.timeout:
if time_outs > 3: # file has not had activity in max seconds...
print "\n\n\nsorry -- try back later"
os.unlink(file_name)
raise
else: # start counting time outs...
print "\nHmmm... little issue... I'll wait a couple of seconds"
time.sleep(3)
time_outs+=1
continue
if not buffer: # end of the download
sys.stdout.write('\rDone!'+' '*len(status)+'\n\n')
sys.stdout.flush()
break
file_size_dl += len(buffer)
f.write(buffer)
status = '{:20,} Bytes [{:.2%}] received'.format(file_size_dl,
file_size_dl * 1.0 / file_size)
sys.stdout.write('\r'+status)
sys.stdout.flush()
return file_name
This prints a status as expected. If I unplug my ethernet cable, I get:
Downloading: Python-2.7.3.tgz Bytes: 14,135,620
827,392 Bytes [5.85%] received
sorry -- try back later
If I unplug the cable, then plug it back in in less than 12 seconds, I get:
Downloading: Python-2.7.3.tgz Bytes: 14,135,620
716,800 Bytes [5.07%] received
Hmmm... little issue... I'll wait a couple of seconds
Hmmm... little issue... I'll wait a couple of seconds
Done!
The file is successfully downloaded.
You can see that urllib2 supports both timeouts and reconnects. If you disconnect and stay disconnected for 3 * 4 seconds == 12 seconds, it will timeout for good and raise a fatal exception. This could be dealt with as well.

PyAudio Input overflowed

I'm trying to make real-time plotting sound in python. I need to get chunks from my microphone.
Using PyAudio, try to use
import pyaudio
import wave
import sys
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
print "* recording"
all = []
for i in range(0, RATE / chunk * RECORD_SECONDS):
data = stream.read(chunk)
all.append(data)
print "* done recording"
stream.close()
p.terminate()
After, I getting the followin error:
* recording
Traceback (most recent call last):
File "gg.py", line 23, in <module>
data = stream.read(chunk)
File "/usr/lib64/python2.7/site-packages/pyaudio.py", line 564, in read
return pa.read_stream(self._stream, num_frames)
IOError: [Errno Input overflowed] -9981
I can't understand this buffer. I want, to use blocking IO mode, so if chunks not available, i want to wait for those chunks. But when I creating try except segment or sleep(0.1), i hear clicks, so this is not what i want.
Please suggest the best solution for my ploblem?
pyaudio.Stream.read() has a keyword parameter exception_on_overflow, set this to False.
For your sample code that would look like:
import pyaudio
import wave
import sys
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
print "* recording"
all = []
for i in range(0, RATE / chunk * RECORD_SECONDS):
data = stream.read(chunk, exception_on_overflow = False)
all.append(data)
print "* done recording"
stream.close()
p.terminate()
See the PyAudio documentation for more details.
It seems like a lot of people are encountering this issue. I dug a bit into it and I think it means that between the previous call to stream.read() and this current call, data from the stream was lost (i.e. the buffer filled up faster than you cleared it).
From the doc for Pa_ReadStream() (the PortAudio function that stream.read() eventually ends up calling):
#return On success PaNoError will be returned, or PaInputOverflowed if
input data was discarded by PortAudio after the previous call and
before this call.
(PaInputOverflowed then causes an IOError in the pyaudio wrapper).
If it's OK for you to not capture every single frame, then you may ignore this error. If it's absolutely critical for you to have every frame, then you'll need to find a way to increase the priority of your application. I'm not familiar enough with Python to know a pythonic way to do this, but it's worth trying a simple nice command, or changing the scheduling policy to SCHED_DEADLINE.
Edit:
One issue right now is that when IOError is thrown, you lose all the frames collected in that call. To instead ignore the overflow and just return what we have, you can apply the patch below, which will cause stream.read() to ignore output underrun and input overflow errors from PortAudio (but still throw something if a different error occurred). A better way would be to make this behaviour (throw/no throw) customizable depending on your needs.
diff --git a/src/_portaudiomodule.c b/src/_portaudiomodule.c
index a8f053d..0878e74 100644
--- a/src/_portaudiomodule.c
+++ b/src/_portaudiomodule.c
## -2484,15 +2484,15 ## pa_read_stream(PyObject *self, PyObject *args)
} else {
/* clean up */
_cleanup_Stream_object(streamObject);
+
+ /* free the string buffer */
+ Py_XDECREF(rv);
+
+ PyErr_SetObject(PyExc_IOError,
+ Py_BuildValue("(s,i)",
+ Pa_GetErrorText(err), err));
+ return NULL;
}
-
- /* free the string buffer */
- Py_XDECREF(rv);
-
- PyErr_SetObject(PyExc_IOError,
- Py_BuildValue("(s,i)",
- Pa_GetErrorText(err), err));
- return NULL;
}
return rv;
I got the same error when I ran your code. I looked at the default sample rate of my default audio device, my macbook's internal microphone, it was 48000Hz not 44100Hz.
p.get_device_info_by_index(0)['defaultSampleRate']
Out[12]: 48000.0
When I changed RATE to this value, it worked.
I worked this on OS X 10.10, Got the same error while trying to get audio from the microphone in a SYBA USB card (C Media chipset), and process it in real time with fft's and more:
IOError: [Errno Input overflowed] -9981
The overflow was completely solved when using a Callback Mode, instead of the Blocking Mode, as written by libbkmz.(https://www.python.org/dev/peps/pep-0263/)
Based on that, the bit of the working code looked like this:
"""
Creating the audio stream from our mic
"""
rate=48000
self.chunk=2**12
width = 2
p = pyaudio.PyAudio()
# callback function to stream audio, another thread.
def callback(in_data,frame_count, time_info, status):
self.audio = numpy.fromstring(in_data,dtype=numpy.int16)
return (self.audio, pyaudio.paContinue)
#create a pyaudio object
self.inStream = p.open(format = p.get_format_from_width(width, unsigned=False),
channels=1,
rate=rate,
input=True,
frames_per_buffer=self.chunk,
stream_callback = callback)
"""
Setting up the array that will handle the timeseries of audio data from our input
"""
self.audio = numpy.empty((self.buffersize),dtype="int16")
self.inStream.start_stream()
while True:
try:
self.ANY_FUNCTION() #any function to run parallel to the audio thread, running forever, until ctrl+C is pressed.
except KeyboardInterrupt:
self.inStream.stop_stream()
self.inStream.close()
p.terminate()
print("* Killed Process")
quit()
This code will create a callback function, then create a stream object, start it and then loop in any function. A separate thread streams audio, and that stream is closed when the main loop is stopped. self.audio is used in any function. I also had problems with the thread running forever if not terminated.
Since Pyaudio runs this stream in a separate thread, and this made the audio stream stable, the Blocking mode might have been saturating depending on the speed or timing of the rest of the processes in the script.
Note that the chunk size is 2^12, but smaller chunks work just as well. There are other parameters I considered and played around with to make sure they all made sense:
Chunk size larger or smaller(no effect)
Number and format of bits for the words in the buffer, signed 16 bit in this case.
signedness of variables(tried with unsigned and got saturation patterns)
Nature of mic input, and selection as default in the system, gain etc.
Hope that works for someone!
My other answer solved the problem in most cases. However sometimes the error still occurs.
That was the reason why I scrapped pyaudio and switched to pyalsaaudio. My Raspy now smoothly records any sound.
import alsaaudio
import numpy as np
import array
# constants
CHANNELS = 1
INFORMAT = alsaaudio.PCM_FORMAT_FLOAT_LE
RATE = 44100
FRAMESIZE = 1024
# set up audio input
recorder=alsaaudio.PCM(type=alsaaudio.PCM_CAPTURE)
recorder.setchannels(CHANNELS)
recorder.setrate(RATE)
recorder.setformat(INFORMAT)
recorder.setperiodsize(FRAMESIZE)
buffer = array.array('f')
while <some condition>:
buffer.fromstring(recorder.read()[1])
data = np.array(buffer, dtype='f')
FORMAT = pyaudio.paInt16
Make sure to set the correct format, my internal microphone was set to 24 Bit (see Audio-Midi-Setup application).
I had the same issue on the really slow raspberry pi, but I was able to solve it (for most cases) by using the faster array module for storing the data.
import array
import pyaudio
FORMAT = pyaudio.paInt16
CHANNELS = 1
INPUT_CHANNEL=2
RATE = 48000
CHUNK = 512
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=INPUT_CHANNEL,
frames_per_buffer =CHUNK)
print("* recording")
try:
data = array.array('h')
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data.fromstring(stream.read(CHUNK))
finally:
stream.stop_stream()
stream.close()
p.terminate()
print("* done recording")
The content of data is rather binary afterwards.
But you can use numpy.array(data, dtype='i') to get a numpy array of intergers.
Instead of
chunk = 1024
use:
chunk = 4096
It worked for me on a USB microphone.
This was helpful for me:
input_ = stream.read(chunk, exception_on_overflow=False)
exception_on_overflow = False
For me this helped: https://stackoverflow.com/a/46787874/5047984
I used multiprocessing to write the file in parallel to recording audio. This is my code:
recordAudioSamples.py
import pyaudio
import wave
import datetime
import signal
import ftplib
import sys
import os
# configuration for assos_listen
import config
# run the audio capture and send sound sample processes
# in parallel
from multiprocessing import Process
# CONFIG
CHUNK = config.chunkSize
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = config.samplingRate
RECORD_SECONDS = config.sampleLength
# HELPER FUNCTIONS
# write to ftp
def uploadFile(filename):
print("start uploading file: " + filename)
# connect to container
ftp = ftplib.FTP(config.ftp_server_ip, config.username, config.password)
# write file
ftp.storbinary('STOR '+filename, open(filename, 'rb'))
# close connection
ftp.quit()
print("finished uploading: " +filename)
# write to sd-card
def storeFile(filename,frames):
print("start writing file: " + filename)
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
print(filename + " written")
# abort the sampling process
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
# close stream and pyAudio
stream.stop_stream()
stream.close()
p.terminate()
sys.exit(0)
# MAIN FUNCTION
def recordAudio(p, stream):
sampleNumber = 0
while (True):
print("* recording")
sampleNumber = sampleNumber +1
frames = []
startDateTimeStr = datetime.datetime.now().strftime("%Y_%m_%d_%I_%M_%S_%f")
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
fileName = str(config.sensorID) + "_" + startDateTimeStr + ".wav"
# create a store process to write the file in parallel
storeProcess = Process(target=storeFile, args=(fileName,frames))
storeProcess.start()
if (config.upload == True):
# since waiting for the upload to finish will take some time
# and we do not want to have gaps in our sample
# we start the upload process in parallel
print("start uploading...")
uploadProcess = Process(target=uploadFile, args=(fileName,))
uploadProcess.start()
# ENTRYPOINT FROM CONSOLE
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# directory to write and read files from
os.chdir(config.storagePath)
# abort by pressing C
signal.signal(signal.SIGINT, signal_handler)
print('\n\n--------------------------\npress Ctrl+C to stop the recording')
# start recording
recordAudio(p, stream)
config.py
### configuration file for assos_listen
# upload
upload = False
# config for this sensor
sensorID = "al_01"
# sampling rate & chunk size
chunkSize = 8192
samplingRate = 44100 # 44100 needed for Aves sampling
# choices=[4000, 8000, 16000, 32000, 44100] :: default 16000
# sample length in seconds
sampleLength = 10
# configuration for assos_store container
ftp_server_ip = "192.168.0.157"
username = "sensor"
password = "sensor"
# storage on assos_listen device
storagePath = "/home/pi/assos_listen_pi/storage/"

Categories

Resources