How to correctly convert MIDI ticks to milliseconds? - python

I'm trying to convert MIDI ticks/delta time to milliseconds and have found a few helpful resources already:
MIDI Delta Time Ticks to Seconds
How to convert midi timeline into the actual timeline that should be played
MIDI Time Code spec
MTC
The problem is I don't think I'm using this information correctly.
I've tried applying the formula Nik expanded:
[ 1 min 60 sec 1 beat Z clocks ]
| ------- * ------ * -------- * -------- | = seconds
[ X beats 1 min Y clocks 1 ]
using the metadata from this test MIDI file:
<meta message set_tempo tempo=576923 time=0>
<meta message key_signature key='Ab' time=0>
<meta message time_signature numerator=4 denominator=4 clocks_per_click=24 notated_32nd_notes_per_beat=8 time=0>
Like so:
self.toSeconds = 60.0 * self.t[0][2].clocks_per_click / (self.t[0][0].tempo * self.t[0][2].denominator) * 10
This initially looks ok, but then it seems to drift.
Here is a basic runnable example using Mido and pygame (assuming pygame plays back correctly):
import threading
import pygame
from pygame.locals import *
from mido import MidiFile,MetaMessage
music_file = "Bee_Gees_-_Stayin_Alive-Voice.mid"
#audio setup
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 1024 # number of samples
pygame.mixer.init(freq, bitsize, channels, buffer)
pygame.mixer.music.set_volume(0.8)
class MIDIPlayer(threading.Thread):
def __init__(self,music_file):
try:
#MIDI parsing
self.mid = MidiFile(music_file)
self.t = self.mid.tracks
for i, track in enumerate(self.mid.tracks):
print('Track {}: {}'.format(i, track.name))
for message in track:
if isinstance(message, MetaMessage):
if message.type == 'time_signature' or message.type == 'set_tempo' or message.type == 'key_signature':
print message
self.t0 = self.t[0][3:len(self.t[0])-1]
self.t0l = len(self.t0)
self.toSeconds = 60.0 * self.t[0][2].clocks_per_click / (self.t[0][0].tempo * self.t[0][2].denominator) * 10
print "self.toSeconds",self.toSeconds
#timing setup
self.event_id = 0
self.now = pygame.time.get_ticks()
self.play_music(music_file)
except KeyboardInterrupt:
pygame.mixer.music.fadeout(1000)
pygame.mixer.music.stop()
raise SystemExit
def play_music(self,music_file):
clock = pygame.time.Clock()
try:
pygame.mixer.music.load(music_file)
print "Music file %s loaded!" % music_file
except pygame.error:
print "File %s not found! (%s)" % (music_file, pygame.get_error())
return
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
# check if playback has finished
millis = pygame.time.get_ticks()
deltaMillis = self.t0[self.event_id].time * self.toSeconds * 1000
# print millis,deltaMillis
if millis - self.now >= deltaMillis:
print self.t0[self.event_id].text
self.event_id = (self.event_id + 1) % self.t0l
self.now = millis
clock.tick(30)
MIDIPlayer(music_file)
What the above code should do is print the correct lyric at the correct time based on the midi file, yet it drifts over time.
What's the correct way of converting MIDI delta time to seconds/milliseconds ?
Update
Based on CL's helpful answer I've updated the code to use ticks_per_beat from the header. Since there is a single set_tempo meta message, I am using this value throughout:
import threading
import pygame
from pygame.locals import *
from mido import MidiFile,MetaMessage
music_file = "Bee_Gees_-_Stayin_Alive-Voice.mid"
#audio setup
freq = 44100 # audio CD quality
bitsize = -16 # unsigned 16 bit
channels = 2 # 1 is mono, 2 is stereo
buffer = 1024 # number of samples
pygame.mixer.init(freq, bitsize, channels, buffer)
pygame.mixer.music.set_volume(0.8)
class MIDIPlayer(threading.Thread):
def __init__(self,music_file):
try:
#MIDI parsing
self.mid = MidiFile(music_file)
self.t = self.mid.tracks
for i, track in enumerate(self.mid.tracks):
print('Track {}: {}'.format(i, track.name))
for message in track:
# print message
if isinstance(message, MetaMessage):
if message.type == 'time_signature' or message.type == 'set_tempo' or message.type == 'key_signature' or message.type == 'ticks_per_beat':
print message
self.t0 = self.t[0][3:len(self.t[0])-1]
self.t0l = len(self.t0)
self.toSeconds = 60.0 * self.t[0][2].clocks_per_click / (self.t[0][0].tempo * self.t[0][2].denominator) * 10
print "self.toSeconds",self.toSeconds
# append delta delays in milliseconds
self.delays = []
tempo = self.t[0][0].tempo
ticks_per_beat = self.mid.ticks_per_beat
last_event_ticks = 0
microseconds = 0
for event in self.t0:
delta_ticks = event.time - last_event_ticks
last_event_ticks = event.time
delta_microseconds = tempo * delta_ticks / ticks_per_beat
microseconds += delta_microseconds
print event.text,microseconds/1000000.0
self.delays.append(microseconds/1000)
#timing setup
self.event_id = 0
self.now = pygame.time.get_ticks()
self.play_music(music_file)
except KeyboardInterrupt:
pygame.mixer.music.fadeout(1000)
pygame.mixer.music.stop()
raise SystemExit
def play_music(self,music_file):
clock = pygame.time.Clock()
try:
pygame.mixer.music.load(music_file)
print "Music file %s loaded!" % music_file
except pygame.error:
print "File %s not found! (%s)" % (music_file, pygame.get_error())
return
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
# check if playback has finished
millis = pygame.time.get_ticks()
# deltaMillis = self.t0[self.event_id].time * self.toSeconds * 1000
deltaMillis = self.delays[self.event_id]
# print millis,deltaMillis
if millis - self.now >= deltaMillis:
print self.t0[self.event_id].text
self.event_id = (self.event_id + 1) % self.t0l
self.now = millis
clock.tick(30)
MIDIPlayer(music_file)
The timing of the messages I print based on the time converted to milliseconds looks much better. However, after a few seconds it still drifts.
Am I correctly converting MIDI ticks to milliseconds and keep track of passed milliseconds in the update while loop ?
This how the conversion is made:
self.delays = []
tempo = self.t[0][0].tempo
ticks_per_beat = self.mid.ticks_per_beat
last_event_ticks = 0
microseconds = 0
for event in self.t0:
delta_ticks = event.time - last_event_ticks
last_event_ticks = event.time
delta_microseconds = tempo * delta_ticks / ticks_per_beat
microseconds += delta_microseconds
print event.text,microseconds/1000000.0
self.delays.append(microseconds/1000)
and this is how the check if a 'cue' was encountered as time passes:
millis = pygame.time.get_ticks()
deltaMillis = self.delays[self.event_id]
if millis - self.now >= deltaMillis:
print self.t0[self.event_id].text
self.event_id = (self.event_id + 1) % self.t0l
self.now = millis
clock.tick(30)
I'm not sure if this implementation converts MIDI delta ticks to milliseconds incorrectly, incorrectly check if millisecond based delays pass or both.

First, you have to merge all tracks, to ensure that the tempo change events are processed properly. (This is probably easier if you convert delta times to absolute tick values first; otherwise, you'd have to recompute the delta times whenever an event is inserted between events of another track.)
Then you have to compute, for each event, the relative time to the last event, like in the following pseudocode. It is important that the computation must use relative times because the tempo could have changed at any time:
tempo = 500000 # default: 120 BPM
ticks_per_beat = ... # from the file header
last_event_ticks = 0
microseconds = 0
for each event:
delta_ticks = event.ticks - last_event_ticks
last_event_ticks = event.ticks
delta_microseconds = tempo * delta_ticks / ticks_per_beat
microseconds += delta_microseconds
if event is a tempo event:
tempo = event.new_tempo
# ... handle event ...

You might want to increase the frame rate. On my system, increasing clock.tick(30) to clock.tick(300) gives good results. You can measure this by printing how much your timing is off:
print self.t0[self.event_id].text, millis - self.now - deltaMillis
With 30 ticks the cues are lagging 20 to 30 millisecond behind. With 300 ticks they are at most 2 milliseconds behind. You might want to increase this even further.
Just to be safe you should run python with the -u switch to prevent stdout from buffering (this might be unnecessary, since lines end with newline).
I have a hard time determining the timing, but judging from the "Ah ha ha ha"'s it seems to be correct with these changes.

Related

Sending and receiving a signal at the same time

I’m working in python on a raspberry pi. I’m trying to send out a signal on a motor controller, and then receive a signal with a sensing hat after it pass through my plant (an RC filter in this case).
The important thing is I want to generate the output and read the input as close to simultaneously as possible. I was hoping to use multiprocessing to have a thread send the signal while the other read the incoming signal. But I keep getting confused on how threads work in python.
In short is it possible to do 2 different tasks with multiprocessing and then repeat those tasks (sending and reading a signal) until a condition is met. (like in a while loop)
(Edited with Code)
from __future__ import print_function
from PyQt5.QtWidgets import QAction
from pyqtgraph.Qt import QtGui, QtCore
from adafruit_motorkit import MotorKit
import pyqtgraph as pg
import sys
from sys import stdout
import numpy as np
from daqhats import mcc118, OptionFlags, HatIDs, HatError
from daqhats_utils import select_hat_device, enum_mask_to_string, \
chan_list_to_mask
from decimal import *
import math
import time
getcontext().prec = 3
total_samples_read = 0
READ_ALL_AVAILABLE = -1
channelData = np.zeros(4, dtype=float)
CURSOR_BACK_2 = '\x1b[2D'
ERASE_TO_END_OF_LINE = '\x1b[0K'
# for plotting data
########################################
scan_rate = 1000 # scan rate in hz
maxtime = 30 # second s to run for
Datatime = np.zeros(maxtime * scan_rate, dtype=float)#List of times when smaples are taken
Data1 = np.zeros(maxtime * scan_rate, dtype=float) #sampels taken
data_index = 0 # Maximum index of data points taken
dt = Decimal(1 / scan_rate) # difference in time between indexes of Datatime
display_index = 0 # maximum index of Data being displayed on plot
#################################
# variables for Data logger
##########################
is_scanning = False
channels = [0]
channel_mask = chan_list_to_mask(channels)
num_channels = len(channels)
samples_per_channel = 0
options = OptionFlags.CONTINUOUS
######################################
startedTime = 0 # time at program start
myTime = 0 # time since program started
try:
address = select_hat_device(HatIDs.MCC_118)
hat = mcc118(address)
except (HatError, ValueError) as err:
print('\n', err)
class MainWindow(pg.GraphicsWindow):
def __init__(self, *args, **kwargs):
super(pg.GraphicsWindow, self).__init__(*args, **kwargs)
self.delay = 30 #ms
self.quit = QAction("Quit", self)
self.quit.triggered.connect(self.clean_close)
self.timer = QtCore.QTimer()
self.timer.setInterval(self.delay)
self.timer.timeout.connect(self.update_plot)
# plots data and runs calibrate between trials
def update_plot(self):
global display_index, Datatime, Data1
kit.motor1.throttle = .4 + .2 * math.cos((time.time()-startedTime)* 2 * np.pi* 1) # 1hz sinusiod out of motor
if data_index < len(Data1):
Collect_Data()
plot.setXRange(0, 20, padding=0)
plot.setXRange(0, 20, padding=0)
curve.setData(Datatime[:display_index], Data1[:display_index])
display_index += 1
app.processEvents()
def clean_close(self):
self.close()
# starts data collection
def Collect_Data():
global is_scanning
"""
This function is executed automatically when the module is run directly.
"""
# Store the channels in a list and convert the list to a channel mask that
# can be passed as a parameter to the MCC 118 functions.
try:
# Select an MCC 118 HAT device to use.
# actual_scan_rate = hat.a_in_scan_actual_rate(num_channels, scan_rate)
# Configure and start the scan.
# Since the continuous option is being used, the samples_per_channel
# parameter is ignored if the value is less than the default internal
# buffer size (10000 * num_channels in this case). If a larger internal
# buffer size is desired, set the value of this parameter accordingly.
if not is_scanning:
hat.a_in_scan_start(channel_mask, samples_per_channel, scan_rate,
options)
is_scanning = True
try:
read_and_display_data(hat, num_channels)
except KeyboardInterrupt:
# Clear the '^C' from the display.
print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\n')
print('Stopping')
hat.a_in_scan_stop()
hat.a_in_scan_cleanup()
except (HatError, ValueError) as err:
print('\n', err)
# reads Data off of Hat and adds to Data1
def read_and_display_data(hat, num_channels):
global channelData, data_index, Datatime, Data1
total_samples_read = 0
read_request_size = READ_ALL_AVAILABLE
# When doing a continuous scan, the timeout value will be ignored in the
# call to a_in_scan_read because we will be requesting that all available
# samples (up to the default buffer size) be returned.
timeout = 5.0
# Read all of the available samples (up to the size of the read_buffer which
# is specified by the user_buffer_size). Since the read_request_size is set
# to -1 (READ_ALL_AVAILABLE), this function returns immediately with
# whatever samples are available (up to user_buffer_size) and the timeout
# parameter is ignored.
trigger = True
while trigger == True:
read_result = hat.a_in_scan_read(read_request_size, timeout)
# Check for an overrun error
if read_result.hardware_overrun:
print('\n\nHardware overrun\n')
break
elif read_result.buffer_overrun:
print('\n\nBuffer overrun\n')
break
samples_read_per_channel = int(len(read_result.data) / num_channels)
total_samples_read += samples_read_per_channel
# adds all data in buffer to data to be plotted.
count = 0
if samples_read_per_channel > 0:
index = samples_read_per_channel * num_channels - num_channels
while count < samples_read_per_channel:
for i in range(num_channels):
channelData[i] = read_result.data[index + i]
if data_index < len(Data1):
Data1[data_index] = channelData[0]
Datatime[data_index] = float(dt * Decimal(data_index))
data_index += 1
count += 1
trigger = False
stdout.flush()
if __name__ == '__main__':
app = QtGui.QApplication([])
win = MainWindow() # display window
plot = win.addPlot(1, 0)
curve = plot.plot()
win.show()
kit = MotorKit() # implements motor driver
kit.motor1.throttle = .4 # values 1 is 5v and 0 is 0 volts
startedTime = time.time()
# u = .2*math.cos(t * 2*np.pi*1)
win.timer.start()
sys.exit(app.exec_())

storing reaction times with event.getKeys while playing a sound with sounddevice

I coded an experiment in which participants are presented with a series of visual stimuli (stim duration: 100ms, trial duration: 500ms). Simultaneously with the onset of the visual stimuli, there is a sound playing for 100 ms.
Some of the visual stimuli are targets and participants should press spacebar when they detect the target.
I want to know participants' reaction times to the target. So I store, using event.getKey, the global time when the spacebar was pressed. I store a global time to compare the time of the onset of the trial with the time when spacebar was pressed. I do that because my inter-trial interval is short and it can happen that participants will respond to the target during the following trial.
The code seem to work when I comment out sd.play of the sound, but as soon as the sound is played, the reaction times seem off and it always stores it in the trial following the target trial (even though I know I pressed spacebar during target trial).
Did anyone encounter this problem before?
Below is the code for the procedure:
def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)
return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'
return str(time), pressed
for t in range(n_trials): # n_trials is the total amount of trials
show_target_crosses(pauses, t, trial_paradigm[t], hi_targets, low_targets) # show target
l_trial_start = globalClock.getTime()
check4esc() # check for esc
#set stimuli according to condition
standing = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[1]),
height=(dimentions[0]), ori=0, pos=(0, 0), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)
laying = visual.Rect(win=win, name='up_cross_hor', width=(dimentions[0]),
height=(dimentions[1]), ori=0, pos=(0, position[all_crosses[trial_paradigm[t]][t]]), lineWidth=1,
lineColor=colors[all_crosses[trial_paradigm[t]][t]],
lineColorSpace='rgb', fillColor=colors[all_crosses[trial_paradigm[t]][t]],
fillColorSpace='rgb', opacity=1, depth=0.0, interpolate=True)
sd.play(all_sounds[all_paradigms[trial_paradigm[t]][t]], fs) # Play sound
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
trigger(trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01) # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
trigger(trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]],0.01)
core.wait(0.06) # adjust diode to sound delay
standing.draw() # vertical bar
laying.draw() # horizontal bar
whiteOn.draw() # square
win.flip() # show cross and white square for fotodiode
core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.032) # adjust ITI
l_fp = int(ok_data[0])
l_block_nr = blocks[t]+1
l_trial_nr = (range(367)*n_blocks)[t]+1
l_condition = trial_paradigm[t]
l_sound = all_sounds_names[all_paradigms[trial_paradigm[t]][t]]
if first_seven[t] == 0:
if all_responses[trial_paradigm[t]][t] == 0:
l_trigger = trig_list[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]] # send sound trigger
elif all_responses[trial_paradigm[t]][t] == 1:
l_trigger = trig_list_targets[trial_paradigm[t]][all_paradigms[trial_paradigm[t]][t]]
elif first_seven[t] == 1:
l_trigger = 999
l_target = all_responses[trial_paradigm[t]][t]
l_cross_condition = all_crosses[trial_paradigm[t]][t]
key = event.getKeys(keyList = ['space'], timeStamped = globalClock)
l_response_time = response_check(key)[0]
# Save data to file
#'fp\tblock_nr\ttrial_nr\tcondition\tsound\ttrigger\ttarget\tcross_cond\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%i\t%i\t%s\t%i\t%i\t%i\t%f\t%s\n' %(
l_fp, l_block_nr, l_trial_nr, l_condition, l_sound, l_trigger,
l_target, l_cross_condition, l_trial_start, l_response_time))
paus(t, pauses, blocks, trig = 192) # check for pauses
=========== EDIT ============
Below I paste the MCVE version of the whole experiment:
from psychopy import visual
from psychopy import core, gui, data, event, parallel
import sounddevice as sd
import time, random, math, sys
import numpy as np
# Functions --------------------------------------------------------------------
def response_check(key):
"""
Checks if a key was pressed.
Keyword arguments:
key -- containing either a keypress and a time or nothing (list)
return:
time -- nan if not pressed or time of press if pressed
"""
if len(key) == 0:
pressed = 0
elif 'space' in key[0]:
pressed = 1
if pressed == 1:
time = key[0][1]
elif pressed == 0:
time = 'nan'
return str(time), pressed
def create_sinusoid (freq = 1000, phase = 0, fs = 48000, dur = 1):
'''Create a sinusoid of specified length with amplitude -1 to 1. Use
set_gain() and fade() to set amplitude and fade-in-out.
Keyword arguments:
frequency -- frequency in Hz (float)
phase -- phase in radians (float)
fs -- sampling frequency (int)
duration -- duration of signal in seconds (float).
Return:
sinusoid -- monosignal of sinusoid (1xn numpy array)
'''
t = np.arange(0, dur, 1.0/fs) # Time vector
sinusoid = np.sin(phase + 2*np.pi* freq * t) # Sinusoid (mono signal)
return sinusoid
def fade(monosignal,samples):
'''Apply a raised cosine to the start and end of a mono signal.
Keyword arguments:
monosignal -- vector (1xn numpy array).
samples -- number of samples of the fade (integer). Make sure that:
2*samples < len(monosignal)
Return:
out -- faded monosignal (1xn numpy array)
'''
ramps = 0.5*(1-np.cos(2*np.pi*(np.arange(2*samples))/(2*samples-1)))
fadein = ramps[0:samples]
fadeout = ramps[samples:len(ramps)+1]
plateu = np.ones(len(monosignal)-2*samples)
weight = np.concatenate((fadein,plateu,fadeout))
out = weight*monosignal
return out
def set_gain(mono, gaindb):
''' Set gain of mono signal, to get dB(rms) to specified gaindb
Keyword arguments:
mono -- vector (numpy array).
gaindb -- gain of mono in dB re max = 0 dB (float).
Return:
gained -- monosignal (numpy array)
'''
rms = np.sqrt(np.mean(mono**2))
adjust = gaindb - 20 * np.log10(rms)
gained = 10**(adjust/20.0) * mono # don't forget to make 20 a float (20.0)
# Print warning if overload, that is, if any abs(sample-value) > 1
if (np.max(np.abs(gained)) > 1):
message1 = "WARNING: set_gain() generated overloaded signal!"
message2 = "max(abs(signal)) = " + str(np.max(np.abs(gained)))
message3 = ("number of samples >1 = " +
str(np.sum(1 * (np.abs(gained) > 1))))
print message1
print message2
print message3
return gained
# Screen
win = visual.Window([800, 600], allowGUI = False, # [1920, 1080]
monitor = 'testMonitor', units = 'height', color = 'gray')
# ==============================================================================
# TONE ORDER AND RESPONSES ----------------------------------------------------
# 1 - 500 Hz
# 0 - 550 Hz
# 2 - 605 Hz
# 3 - 666 Hz
# 4 - 732 Hz
# 5 - 805 Hz
# 6 - 886 Hz
# 7 - 974 Hz
tone_order = np.random.choice([0,1,2,3,4,5,6,7], 20, replace = True)
targets = np.random.choice([1,0,0,0,0]*4, 20, replace = False)
# ==============================================================================
# CREATE SOUNDS ----------------------------------------------------------------
#sd.default.device = "ASIO Fireface USB"
print 'Sound device ------------------------------------------------------------'
print sd.query_devices()#device = "ASIO Fireface USB")
print '-------------------------------------------------------------------------'
# Set the gain and sampling frequency (fs)
gain = -30
fs = 44100
frequencies = [500, 550, 605, 666, 732, 805, 886, 974]
tones = [0]*8
for t in range(len(frequencies)):
tones[t] = set_gain(fade(create_sinusoid(
freq = frequencies[t], phase = 0, fs = fs, dur = 0.1),441),gain) # 100 ms, 10 ms fade in/out
f_500 = np.transpose(np.array([tones[0],tones[0]])) # deviant, control
f_550 = np.transpose(np.array([tones[1],tones[1]])) # standard
f_605 = np.transpose(np.array([tones[2],tones[2]]))
f_666 = np.transpose(np.array([tones[3],tones[3]]))
f_732 = np.transpose(np.array([tones[4],tones[4]]))
f_805 = np.transpose(np.array([tones[5],tones[5]]))
f_886 = np.transpose(np.array([tones[6],tones[6]]))
f_974 = np.transpose(np.array([tones[7],tones[7]]))
all_tones = [f_500, f_550, f_605, f_666, f_732, f_805, f_886, f_974]
# ==============================================================================
# CREATE VISUALS ---------------------------------------------------------------
stimulus = visual.TextStim(
win, color = 'white', height = 0.03, pos = (0, 0), text = '')
# ==============================================================================
# Make a text file to save data ------------------------------------------------
fileName = 'test'
dataFile = open(fileName+'.txt', 'w')
dataFile.write('soundCond\ttarget\ttrial_start\tresponse_time\n')
# ==============================================================================
# Keep track of time -----------------------------------------------------------
globalClock = core.Clock()
respClock = core.Clock()
# ==============================================================================
# Experimental procedure -------------------------------------------------------
# Trial loop
for t in range(len(tone_order)):
l_trial_start = globalClock.getTime()
#set stimuli according to condition
if targets[t] == 0:
stimulus.text = '+'
else:
stimulus.text = 'o'
sd.play(all_tones[tone_order[t]], fs) # Play sound for current trial
core.wait(0.08) # adjust visual to sound delay
stimulus.draw() # vertical bar
win.flip() # show cross and white
core.wait(0.1) # show cross 100 ms
win.flip() # turn visual stuff off
core.wait(0.26) # adjust ITI
l_sound = tone_order[t]
l_target = targets[t]
key = event.getKeys(keyList = ['space'], timeStamped = globalClock)
l_response_time = response_check(key)[0]
# Save data to file
#'soundCond\ttarget\ttrial_start\tresponse_time\n'
dataFile.write('%i\t%i\t%f\t%s\n' %(
l_sound, l_target, l_trial_start, l_response_time))
dataFile.close()
Your second code example shows that you are using PsychoPy.
Why are you not using its audio capabilities?
Incidentally, the sounddevice module can be used as audio backend in PsychoPy and they are using an sd.OutputStream and a callback function internally, just as I suggested.
But if you use PsychoPy's audio functions, you don't really have to worry about that.
BTW, the PsychoPy community is really helpful, check out their forum: https://discourse.psychopy.org/.
Regarding this comment:
Our program is extremely simple
Playing audio with exact timing is never simple.
There are big platform-dependent differences and you should always measure if you want to make sure the timing is right.

segmentation fault :pyaudio recording in no blocking mode

I've been trying to record audio using pyaudio untill silence is met in the input stream .but segmentation fault happens while running it .i don't think anything is wrong with pyaudio/portaudio installed in my raspberry pi because pyaudio works when i tried to run examples in pyaudio docs it works without any issue .i tried to debug it with pdb and
gdb these are the results :
Recording: Setting up
Thread 1 "python" received signal SIGSEGV, Segmentation fault.
0x7652a298 in ?? ()
from /usr/lib/python2.7/dist-packages/_portaudio.arm-linux- gnueabihf.so
(gdb) backtrace
#0 0x7652a298 in ?? ()
from /usr/lib/python2.7/dist-packages/_portaudio.arm-linux- gnueabihf.so
#1 0x764f47b0 in Pa_GetDeviceInfo ()
from /usr/lib/arm-linux-gnueabihf/libportaudio.so.2
#2 0x7effe2c4 in ?? ()
Backtrace stopped: previous frame identical to this frame (corrupt stack?)
(gdb)
pyaudio callback function
def _callback(self, in_data, frame_count, time_info, status): # pylint: disable=unused-argument
debug = logging.getLogger('alexapi').getEffectiveLevel() == logging.DEBUG
if not in_data:
self._queue.put(False)
return None, pyaudio.paAbort
do_VAD = True
if self._callback_data['force_record'] and not self._callback_data['force_record'][1]:
do_VAD = False
# do not count first 10 frames when doing VAD
if do_VAD and (self._callback_data['frames'] < self._callback_data['throwaway_frames']):
self._callback_data['frames'] += 1
# now do VAD
elif (self._callback_data['force_record'] and self._callback_data['force_record'][0]()) \
or (do_VAD and (self._callback_data['thresholdSilenceMet'] is False)
and ((time.time() - self._callback_data['start']) < self.MAX_RECORDING_LENGTH)):
if do_VAD:
if int(len(in_data) / 2) == self.VAD_PERIOD:
isSpeech = self._vad.is_speech(in_data, self.VAD_SAMPLERATE)
if not isSpeech:
self._callback_data['silenceRun'] += 1
else:
self._callback_data['silenceRun'] = 0
self._callback_data['numSilenceRuns'] += 1
# only count silence runs after the first one
# (allow user to speak for total of max recording length if they haven't said anything yet)
if (self._callback_data['numSilenceRuns'] != 0) \
and ((self._callback_data['silenceRun'] * self.VAD_FRAME_MS) > self.VAD_SILENCE_TIMEOUT):
self._callback_data['thresholdSilenceMet'] = True
else:
self._queue.put(False)
return None, pyaudio.paComplete
self._queue.put(in_data)
if debug:
self._callback_data['audio'] += in_data
return None, pyaudio.paContinue
pyaudio
def _callback(self, in_data, frame_count, time_info, status): # pylint: disable=unused-argument
debug = logging.getLogger('alexapi').getEffectiveLevel() == logging.DEBUG
if not in_data:
self._queue.put(False)
return None, pyaudio.paAbort
do_VAD = True
if self._callback_data['force_record'] and not self._callback_data['force_record'][1]:
do_VAD = False
# do not count first 10 frames when doing VAD
if do_VAD and (self._callback_data['frames'] < self._callback_data['throwaway_frames']):
self._callback_data['frames'] += 1
# now do VAD
elif (self._callback_data['force_record'] and self._callback_data['force_record'][0]()) \
or (do_VAD and (self._callback_data['thresholdSilenceMet'] is False)
and ((time.time() - self._callback_data['start']) < self.MAX_RECORDING_LENGTH)):
if do_VAD:
if int(len(in_data) / 2) == self.VAD_PERIOD:
isSpeech = self._vad.is_speech(in_data, self.VAD_SAMPLERATE)
if not isSpeech:
self._callback_data['silenceRun'] += 1
else:
self._callback_data['silenceRun'] = 0
self._callback_data['numSilenceRuns'] += 1
# only count silence runs after the first one
# (allow user to speak for total of max recording length if they haven't said anything yet)
if (self._callback_data['numSilenceRuns'] != 0) \
and ((self._callback_data['silenceRun'] * self.VAD_FRAME_MS) > self.VAD_SILENCE_TIMEOUT):
self._callback_data['thresholdSilenceMet'] = True
else:
self._queue.put(False)
return None, pyaudio.paComplete
self._queue.put(in_data)
if debug:
self._callback_data['audio'] += in_data
return None, pyaudio.paContinue
These are actually adaptation of the code that i found somewhere on the internet.i double checked my device index and sample rate there is nothing wrong with them
can someone help me sort it out ?
complete code is here
pdb result
> /usr/lib/python2.7/dist-packages/pyaudio.py(438)__init__()
-> arguments['stream_callback'] = stream_callback
(Pdb) step
> /usr/lib/python2.7/dist-packages/pyaudio.py(441)__init__()
-> self._stream = pa.open(**arguments)
(Pdb) step
Segmentation fault
root#raspberrypi:/home/pi/Desktop# python -m pdb rp3test.py
Idk may it's just a bug in pyaudio and everylibs that uses pyaudio such as python sounddevice . cause i tried it with sounddevice library . Finally made it work with this code
def silence_listener(throwaway_frames,filename = "recording.wav"):
# Reenable reading microphone raw data
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, alsa_card)
inp.setchannels(1)
inp.setrate(VAD_SAMPLERATE)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(VAD_PERIOD)
audio = ""
# Buffer as long as we haven't heard enough silence or the total size is within max size
thresholdSilenceMet = False
frames = 0
numSilenceRuns = 0
silenceRun = 0
start = time.time()
# do not count first 10 frames when doing VAD
while (frames < throwaway_frames): # VAD_THROWAWAY_FRAMES):
l, data = inp.read()
frames = frames + 1
if l:
audio += data
isSpeech = vad.is_speech(data, VAD_SAMPLERATE)
# now do VAD
while (thresholdSilenceMet == False) and ((time.time() - start) < MAX_RECORDING_LENGTH):
l, data = inp.read()
if l:
audio += data
if (l == VAD_PERIOD):
isSpeech = vad.is_speech(data, VAD_SAMPLERATE)
if (isSpeech == False):
silenceRun = silenceRun + 1
#print "0"
else:
silenceRun = 0
numSilenceRuns = numSilenceRuns + 1
#print "1"
# only count silence runs after the first one
# (allow user to speak for total of max recording length if they haven't said anything yet)
if (numSilenceRuns != 0) and ((silenceRun * VAD_FRAME_MS) > VAD_SILENCE_TIMEOUT):
thresholdSilenceMet = True
if debug: print ("End recording")
rf = open(filename, 'w')
rf.write(audio)
rf.close()
inp.close()
return

Python 2.x - sleep call at millisecond level on Windows

I was given some very good hints in this forum about how to code a clock object in Python 2. I've got some code working now. It's a clock that 'ticks' at 60 FPS:
import sys
import time
class Clock(object):
def __init__(self):
self.init_os()
self.fps = 60.0
self._tick = 1.0 / self.fps
print "TICK", self._tick
self.check_min_sleep()
self.t = self.timestamp()
def init_os(self):
if sys.platform == "win32":
self.timestamp = time.clock
self.wait = time.sleep
def timeit(self, f, args):
t1 = self.timestamp()
f(*args)
t2 = self.timestamp()
return t2 - t1
def check_min_sleep(self):
"""checks the min sleep time on the system"""
runs = 1000
times = [self.timeit(self.wait, (0.001, )) for n in xrange(runs)]
average = sum(times) / runs
print "average min sleep time:", round(average, 6)
sort = sorted(times)
print "fastest, slowest", sort[0], sort[-1]
def tick(self):
next_tick = self.t + self._tick
t = self.timestamp()
while t < next_tick:
t = self.timestamp()
self.t = t
if __name__ == "__main__":
clock = Clock()
The clock does not do too bad, but in order to avoid a busy loop I'd like Windows to sleep less than the usual about 15 milliseconds. On my system (64-bit Windows 10), it returns me an average of about 15 / 16 msecs when starting the clock if Python is the only application that's running. That's way too long for a min sleep to avoid a busy loop.
Does anybody know how I can get Windows to sleep less than that value?
You can temporarily lower the timer period to the wPeriodMin value returned by timeGetDevCaps. The following defines a timer_resolution context manager that calls the timeBeginPeriod and timeEndPeriod functions.
import timeit
import contextlib
import ctypes
from ctypes import wintypes
winmm = ctypes.WinDLL('winmm')
class TIMECAPS(ctypes.Structure):
_fields_ = (('wPeriodMin', wintypes.UINT),
('wPeriodMax', wintypes.UINT))
def _check_time_err(err, func, args):
if err:
raise WindowsError('%s error %d' % (func.__name__, err))
return args
winmm.timeGetDevCaps.errcheck = _check_time_err
winmm.timeBeginPeriod.errcheck = _check_time_err
winmm.timeEndPeriod.errcheck = _check_time_err
#contextlib.contextmanager
def timer_resolution(msecs=0):
caps = TIMECAPS()
winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps))
msecs = min(max(msecs, caps.wPeriodMin), caps.wPeriodMax)
winmm.timeBeginPeriod(msecs)
yield
winmm.timeEndPeriod(msecs)
def min_sleep():
setup = 'import time'
stmt = 'time.sleep(0.001)'
return timeit.timeit(stmt, setup, number=1000)
Example
>>> min_sleep()
15.6137827
>>> with timer_resolution(msecs=1): min_sleep()
...
1.2827173000000016
The original timer resolution is restored after the with block:
>>> min_sleep()
15.6229814

How do perform time-based audio with Pygame?

This is my first question on StackOverflow, so here goes:
Edit: I have edited this a few times, just fixing typing mistakes and updating the code. Even after adding various changes to the code, the issue still remains the exact same.
Also, pygame.mixer.music.fadeout() is not what I'm looking for. This code will also be for when I want to lower music volume to perhaps 50% on, say, pausing the game or entering a talk scene.
With Pygame, I am trying to perform music volume manipulation based on how much time has passed. I already have some decent code created, but it's not performing how I thought it intuitively should. Also, I should note that I am using the component-based EBS system I ripped from PySDL2. Here is the link to the EBS module: https://bitbucket.org/marcusva/py-sdl2/src/02a4bc4f79d9440fe98e372e0ffaadacaefaa5c6/sdl2/ext/ebs.py?at=default
This is my initial block of code:
import pygame
from pygame.locals import *
# Setup import paths for module.
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, os.path.join(parent_dir, "Game"))
import Game
from Porting.sdl2.ext import ebs
pygame.display.quit()
print("Counting down...")
for n in range(5):
print(str(n + 1))
pygame.time.delay(1000)
appworld = ebs.World()
audio_system = Game.audio.AudioSystem(44100, -16, 2, 4096)
appworld.add_system(audio_system)
test1 = Game.sprites.AudioSprite(appworld)
test2 = Game.sprites.AudioSprite(appworld)
test1.audio = Game.audio.Audio(database["BGMusic0"], True)
test2.audio = Game.audio.Audio(database["BGMusic1"], True)
game_clock = pygame.time.Clock()
volume_change_clock = pygame.time.Clock()
loop = True
time_passed = 0
while loop:
game_clock.tick(60)
appworld.process()
time_passed += volume_change_clock.tick(60)
if time_passed > (10 * 1000):
print(time_passed)
if not audio_system.music_volume_changed:
audio_system.set_music_volume(0, True)
My next block of code:
import pygame
from Porting.sdl2.ext import ebs
class AudioSystem(ebs.System):
def __init__(self, frequency, bit_size, channels, buffer):
super(AudioSystem, self).__init__()
self.componenttypes = Audio,
pygame.mixer.init(frequency, bit_size, channels, buffer)
pygame.mixer.set_num_channels(200)
self.frequency = frequency
self.bit_size = bit_size
self.channels = channels
self.buffer = buffer
self.music_volume_change_clock = None
self.music_volume_changed = False
self.music_volume_current = 0
self.music_volume_new = 0
self.music_fade = False
self.music_change_speed = 0
self.time_passed_total = 0
self.time_passed_remainder = 0
def process(self, world, componentsets):
for audio in componentsets:
if audio.is_music:
music = pygame.mixer.music
if not pygame.mixer.music.get_busy():
music.load(audio.file)
music.play()
if self.music_volume_changed:
self.music_volume_current = music.get_volume() * 100
if self.music_volume_current != self.music_volume_new and self.music_fade:
time_passed = self.music_volume_change_clock.tick(60)
self.time_passed_total += time_passed
self.time_passed_total += self.time_passed_remainder
self.time_passed_remainder = 0
if self.time_passed_total > self.music_change_speed:
self.time_passed_remainder = self.time_passed_total % self.music_change_speed
volume_change_amount = int(self.time_passed_total / self.music_change_speed)
self.time_passed_total = 0
if self.music_volume_current > self.music_volume_new:
self.music_volume_current -= volume_change_amount
music.set_volume(self.music_volume_current / 100)
elif self.music_current_volume < self.music_volume_new:
self.music_volume_current += volume_change_amount
music.set_volume(self.music_volume_current / 100)
elif self.music_volume_current != self.music_volume_new:
music.set_volume(self.music_volume_current / 100)
else:
self.music_volume_changed = False
self.music_fade = False
else:
if not audio.channel:
audio.channel = pygame.mixer.find_channel()
audio.channel.play()
def set_music_volume(self, percent, fade = False, change_speed = 50):
self.music_volume_changed = True
self.music_volume_new = percent
self.music_fade = fade
self.music_change_speed = change_speed
self.music_volume_change_clock = pygame.time.Clock()
class Audio(object):
def __init__(self, file, is_music = False):
self.is_music = is_music
if self.is_music:
self.file = file
else:
self.channel = None
self.file = pygame.mixer.Sound(file)
My testing has shown that manipulating the parameter of Clock.tick() in my Game.audio module in various ways influences how quickly the audio playing falls from 100 to 0. Leaving it blank causes it to stop almost instantaneously. At 60, it falls to 0 in around 2 seconds, which baffles me. At 30, in 1 second. At 5, it falls slowly, with the volume never seeming to reach 0. I want to completely desynchronize my audio volume manipulation completely from my game's frame-rate, but I am unsure of how I would accomplish that. I want to avoid threading and multiprocessing if possible.
Thanks in advance! :)
Clock.tick()'s parameter is used to call the SDL sleep function to limit how many times the loop runs per second.
Calling it with Clock.tick(5) limits it to five loops per second.
I've also never used two clocks in the same code, especially with the multiple ticks (all of which will calculate their sleep time individually). Instead of that, consider using the return value of tick (the time in ms since the last call), and use that to track time through the whole application.
Example:
timer = 0
Do things
timer += main_clock.tick(FPS)

Categories

Resources