-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun-experiment.py
More file actions
191 lines (169 loc) · 7.84 KB
/
run-experiment.py
File metadata and controls
191 lines (169 loc) · 7.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
===============================================================================
Script 'run-experiment.py'
===============================================================================
This script plays stimuli and records verbal responses.
"""
# @author: drmccloy
# Created on Thu Jun 15 13:38:22 2017
# License: BSD (3-clause)
import sys
import yaml
import queue
import os.path as op
from os import makedirs
import numpy as np
import pandas as pd
import sounddevice as sd
import soundfile as sf
from glob import glob
from expyfun import ExperimentController, get_keyboard_input
from expyfun.stimuli import read_wav
# load external parameter file
paramfile = 'params.yaml'
with open(paramfile, 'r') as pf:
params = yaml.load(pf)
block_len = params['block_len']
sentences = params['sentences']
n_blocks = int(np.ceil(len(sentences) / block_len))
# load design matrices
design_matrix = pd.read_csv('design-matrix.csv', header=0)
training_stimuli = pd.read_csv('training-list.csv', header=0)
n_training_stims = training_stimuli.shape[0]
# training stimuli will have negative trial numbers
training_stimuli['trial'] = np.arange(n_training_stims) - n_training_stims
# params that apply to both input stream and wavfile output
samplerate = 44100
channels = 1
# input audio settings. Our external analog-to-digital box (M-Audio FastTrack
# Ultra 8R) can *only* provide 24-bit audio. Luckily the backend will
# automatically pad with a zero byte if we specify the dtype as 'int32'. Also,
# since our microphone signal comes in on channel 5, we need to specify this in
# the "extra settings" to the input stream.
sd.default.dtype = 'int32'
sd.default.channels = channels
sd.default.samplerate = samplerate
ch_5 = sd.CoreAudioSettings(channel_map=[4]) # zero-indexed
# output audio settings. Since the samples come in as 24-bit signed ints that
# get padded to 32-bit signed ints during acquisition, we might as well
# write out the WAV file as 32-bit ints to avoid an extra dtype conversion.
soundfile_args = dict(mode='x', samplerate=samplerate, channels=channels,
format='WAV', subtype='PCM_32')
# experiment setup
stim_dir = 'stimuli'
train_dir = op.join(stim_dir, 'training')
live_keys = ['space']
ec_params = dict(exp_name='gend-intel', audio_controller='pyglet',
response_device='keyboard', stim_fs=44100, stim_rms=0.01,
check_rms=None, output_dir='logs', force_quit=['q'],
full_screen=False, window_size=(1024, 768), version='dev')
# messages
msg = {'first_trial': ('Start at which trial? (leave blank and push ENTER to '
'start at beginning): '),
'welcome': ('Press "{0}{3}{1}" any time this window is visible to quit '
'the experiment.\n\nListener responses will start recording'
' automatically after each stimulus; press "{0}Ctrl+C{1}" '
'to stop recording and advance to the next trial (for '
'recording to work, this window must disappear during the '
'response so the terminal can catch the Ctrl+C keystroke). '
'\n\nTalk to the subject, then press "{0}{2}{1}" when they '
'are ready to begin the {4} block.'),
'end_training': ('End of training.\n\nTalk to the subject, then press '
'"{0}{2}{1}" when they are ready to start the first '
'real trial.'),
'end_block': ('Finished block {} of {}.\n\nTalk to the subject and '
'press "{}" when they are ready to continue.'),
'now_playing': ('now playing:\n\n{2} {0}{3}{1}\n{4} {0}{5}{1}\n{6} '
'{0}{7}{1}\n{8} {0}{9}{1}'),
'finished': 'Finished!\n\nPress "{0}{2}{1}" to close.'}
# common formatting strings
white = '{color (255, 255, 255, 255)}'
green = '{color (51, 255, 153, 255)}'
# start experiment controller
with ExperimentController(**ec_params) as ec:
ec.set_visible(False)
# create the output directory for recorded responses
resp_dir = op.join('responses', ec.participant)
makedirs(resp_dir, exist_ok=True)
# load stimulus list for this listener
stimuli = design_matrix.loc[design_matrix['listener'] == int(ec.session),
['filename']]
n_stim = stimuli.shape[0]
stimuli['trial'] = np.arange(n_stim)
if n_stim != 180:
raise RuntimeError('{} stimuli loaded (should be 180)'.format(n_stim))
# get starting trial number
first_trial = get_keyboard_input(msg['first_trial'], default=0,
out_type=int, valid=range(n_stim))
stimuli = stimuli.loc[stimuli['trial'] >= first_trial, :]
# run training?
run_training = get_keyboard_input('Run training [Y/n]?', default='y',
out_type=str, valid=['y', 'Y', 'n', 'N'])
run_training = run_training in ['y', 'Y']
if run_training: # prepend training stimuli
stimuli = pd.concat([training_stimuli, stimuli], ignore_index=True)
# convert dataframe index to be the trial number
stimuli.set_index(['trial'], inplace=True)
# experimenter instructions
ec.set_visible(True)
fmt = [green, white, live_keys[0], ec._response_handler.force_quit_keys[0],
['experiment', 'training'][int(run_training)]]
prompt = msg['welcome'].format(*fmt)
ec.screen_prompt(prompt, live_keys=live_keys, font_size=18, attr=True)
# put a reminder in the terminal window
print('Press Ctrl+C when listener has finished responding.')
# loop over trials
for ix, stim in stimuli.itertuples():
# are we done with training?
if run_training and ix == first_trial:
fmt = [green, white, live_keys[0]]
ec.screen_prompt(msg['end_training'].format(*fmt))
# break between blocks
trial_num = ix - first_trial
if trial_num > 0 and trial_num % block_len == 0:
block_num = trial_num // block_len
fmt = [block_num, n_blocks, live_keys[0]]
ec.screen_prompt(msg['end_block'].format(*fmt))
# load the wav file
dir = train_dir if ix < 0 else stim_dir
wav, fs = read_wav(op.join(dir, stim))
dur = wav.shape[-1] / fs
ec.load_buffer(wav)
# identify trial and save to logfile
talker, sentence, snr = stim[:6], stim[7:12], stim[13:15]
trial_id_parts = ['trial:', str(ix), 'talker:', talker,
'sentence:', sentence, 'SNR:', snr]
ec.identify_trial(ec_id=' '.join(trial_id_parts), ttl_id=[])
# show current stim info and play stimulus
fmt = [green, white] + trial_id_parts
if ix < 0:
fmt[2] = 'training:'
ec.screen_text(msg['now_playing'].format(*fmt))
ec.start_stimulus()
# wait a little less than stim duration, to make sure buffer is open
# when listener starts responding
ec.wait_secs(dur - 0.5)
# save the listener response
ec.set_visible(False)
tn = 'training' if ix < 0 else '{:03}'.format(ix)
resp_file = op.join(resp_dir, '{}_{}.wav'.format(tn, sentence))
try:
q = queue.Queue()
def sd_callback(data_in, frames, time, status):
if status:
print(status, file=sys.stderr)
q.put(data_in.copy())
with sf.SoundFile(resp_file, **soundfile_args) as sfile, \
sd.InputStream(callback=sd_callback, extra_settings=ch_5):
while True:
sfile.write(q.get())
except KeyboardInterrupt:
pass
# finalize trial and restore experimenter interface
ec.trial_ok()
ec.set_visible(True)
# end experiment
ec.screen_prompt(msg['finished'].format(green, white, live_keys[0]),
max_wait=10, live_keys=live_keys)