Coverage for ibllib/qc/task_metrics.py: 97%

409 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-05-07 14:26 +0100

1"""Behaviour QC. 

2 

3This module runs a list of quality control metrics on the behaviour data. 

4 

5.. warning:: 

6 The QC should be loaded using :meth:`ibllib.pipes.base_tasks.BehaviourTask.run_qc` and not 

7 instantiated directly. 

8 

9Examples 

10-------- 

11Running on a behaviour rig computer and updating QC fields in Alyx: 

12 

13>>> from ibllib.qc.task_qc_viewer.task_qc import show_session_task_qc 

14>>> qc = show_session_task_qc(session_path, bpod_only=True, local=True) # must close Viewer window 

15>>> qc = qc.run(update=True) 

16 

17Downloading the required data and inspecting the QC on a different computer: 

18 

19>>> from ibllib.pipes.dynamic_pipeline import get_trials_tasks 

20>>> from one.api import ONE 

21>>> task = get_trials_tasks(session_path, one=ONE())[0] # get first task run 

22>>> task.location = 'remote' 

23>>> task.setUp() # download required data 

24>>> qc = task.run_qc(update=False) 

25>>> outcome, results = qc.run() 

26 

27Inspecting individual test outcomes 

28 

29>>> outcome, results, outcomes = qc.compute_session_status() 

30 

31Running bpod QC on ephys session (when not on behaviour rig PC) 

32 

33>>> from ibllib.qc.task_qc_viewer.task_qc import get_bpod_trials_task, get_trials_tasks 

34>>> from one.api import ONE 

35>>> tasks = get_trials_tasks(session_path, one=ONE()) 

36>>> task = get_bpod_trials_task(tasks[0]) # Ensure Bpod only on behaviour rig 

37>>> task.location = 'remote' 

38>>> task.setUp() # download required data 

39>>> qc = task.run_qc(update=False) 

40>>> outcome, results = qc.run() 

41 

42Running ephys QC, from local server PC (after ephys + bpod data have been copied to a same folder) 

43 

44>>> from ibllib.pipes.dynamic_pipeline import get_trials_tasks 

45>>> task = get_trials_tasks(session_path, one=ONE())[0] # get first task run 

46>>> qc = task.run_qc(update=False) 

47>>> outcome, results = qc.run() 

48""" 

49import logging 

50import sys 

51from packaging import version 

52from pathlib import Path, PurePosixPath 

53from datetime import datetime, timedelta 

54from inspect import getmembers, isfunction 

55from functools import reduce 

56from collections.abc import Sized 

57 

58import numpy as np 

59from scipy.stats import chisquare 

60 

61from brainbox.behavior.wheel import cm_to_rad, traces_by_trial 

62from ibllib.io.extractors import ephys_fpga 

63from one.alf import spec 

64from . import base 

65 

66_log = logging.getLogger(__name__) 

67 

68# todo the 2 followint parameters should be read from the task parameters for each session 

69ITI_DELAY_SECS = .5 

70FEEDBACK_NOGO_DELAY_SECS = 2 

71 

72BWM_CRITERIA = { 

73 'default': {'PASS': 0.99, 'WARNING': 0.90, 'FAIL': 0}, # Note: WARNING was 0.95 prior to Aug 2022 

74 '_task_stimOff_itiIn_delays': {'PASS': 0.99, 'WARNING': 0}, 

75 '_task_positive_feedback_stimOff_delays': {'PASS': 0.99, 'WARNING': 0}, 

76 '_task_negative_feedback_stimOff_delays': {'PASS': 0.99, 'WARNING': 0}, 

77 '_task_wheel_move_during_closed_loop': {'PASS': 0.99, 'WARNING': 0}, 

78 '_task_response_stimFreeze_delays': {'PASS': 0.99, 'WARNING': 0}, 

79 '_task_detected_wheel_moves': {'PASS': 0.99, 'WARNING': 0}, 

80 '_task_trial_length': {'PASS': 0.99, 'WARNING': 0}, 

81 '_task_goCue_delays': {'PASS': 0.99, 'WARNING': 0}, 

82 '_task_errorCue_delays': {'PASS': 0.99, 'WARNING': 0}, 

83 '_task_stimOn_delays': {'PASS': 0.99, 'WARNING': 0}, 

84 '_task_stimOff_delays': {'PASS': 0.99, 'WARNING': 0}, 

85 '_task_stimFreeze_delays': {'PASS': 0.99, 'WARNING': 0}, 

86 '_task_iti_delays': {'NOT_SET': 0}, 

87 '_task_passed_trial_checks': {'NOT_SET': 0} 

88} 

89 

90 

91def compute_session_status_from_dict(results, criteria=None): 

92 """ 

93 Compute overall task QC value from QC check results. 

94 

95 Given a dictionary of results, computes the overall session QC for each key and aggregates 

96 in a single value. 

97 

98 Parameters 

99 ---------- 

100 results : dict 

101 A dictionary of QC keys containing (usually scalar) values. 

102 criteria : dict 

103 A dictionary of qc keys containing map of PASS, WARNING, FAIL thresholds. 

104 

105 Returns 

106 ------- 

107 one.alf.spec.QC 

108 Overall session QC outcome. 

109 dict 

110 A map of QC tests and their outcomes. 

111 """ 

112 if not criteria: 1axmlngcbdei

113 criteria = {'default': BWM_CRITERIA['default']} 

114 outcomes = {k: TaskQC.thresholding(v, thresholds=criteria.get(k, criteria['default'])) 1axmlngcbdei

115 for k, v in results.items()} 

116 

117 # Criteria map is in order of severity so the max index is our overall QC outcome 

118 session_outcome = base.QC.overall_outcome(outcomes.values()) 1amlngcbdei

119 return session_outcome, outcomes 1amlngcbdei

120 

121 

122def update_dataset_qc(qc, registered_datasets, one, override=False): 

123 """ 

124 Update QC values for individual datasets. 

125 

126 Parameters 

127 ---------- 

128 qc : ibllib.qc.task_metrics.TaskQC 

129 A TaskQC object that has been run. 

130 registered_datasets : list of dict 

131 A list of Alyx dataset records. 

132 one : one.api.OneAlyx 

133 An online instance of ONE. 

134 override : bool 

135 If True the QC field is updated even if new value is better than previous. 

136 

137 Returns 

138 ------- 

139 list of dict 

140 The list of registered datasets but with the 'qc' fields updated. 

141 """ 

142 # Create map of dataset name, sans extension, to dataset id 

143 stem2id = {PurePosixPath(dset['name']).stem: dset.get('id') for dset in registered_datasets} 1hc

144 # Ensure dataset stems are unique 

145 assert len(stem2id) == len(registered_datasets), 'ambiguous dataset names' 1hc

146 

147 # dict of QC check to outcome (as enum value) 

148 *_, outcomes = qc.compute_session_status() 1hc

149 # work over map of dataset name (sans extension) to outcome (enum or dict of columns: enum) 

150 for name, outcome in qc.compute_dataset_qc_status(outcomes).items(): 1hc

151 # if outcome is a dict, calculate aggregate outcome for each column 

152 if isinstance(outcome, dict): 1hc

153 extended_qc = outcome 1hc

154 outcome = qc.overall_outcome(outcome.values()) 1hc

155 else: 

156 extended_qc = {} 1hc

157 # check if dataset was registered to Alyx 

158 if not (did := stem2id.get(name)): 1hc

159 _log.debug('dataset %s not registered, skipping', name) 1h

160 continue 1h

161 # update the dataset QC value on Alyx 

162 if outcome > spec.QC.NOT_SET or override: 1hc

163 dset_qc = base.QC(did, one=one, log=_log, endpoint='datasets') 1hc

164 dset = next(x for x in registered_datasets if did == x.get('id')) 1hc

165 dset['qc'] = dset_qc.update(outcome, namespace='', override=override).name 1hc

166 if extended_qc: 1hc

167 dset_qc.update_extended_qc(extended_qc) 1hc

168 return registered_datasets 1hc

169 

170 

171class TaskQC(base.QC): 

172 """Task QC for training, biased, and ephys choice world.""" 

173 

174 criteria = BWM_CRITERIA 

175 

176 extractor = None 

177 """ibllib.qc.task_extractors.TaskQCExtractor: A task extractor object containing raw and extracted data.""" 

178 

179 @staticmethod 

180 def thresholding(qc_value, thresholds=None) -> spec.QC: 

181 """ 

182 Compute the outcome of a single key by applying thresholding. 

183 

184 Parameters 

185 ---------- 

186 qc_value : float 

187 Proportion of passing qcs, between 0 and 1. 

188 thresholds : dict 

189 Dictionary with keys 'PASS', 'WARNING', 'FAIL', (or enum 

190 integers, c.f. one.alf.spec.QC). 

191 

192 Returns 

193 ------- 

194 one.alf.spec.QC 

195 The outcome. 

196 """ 

197 thresholds = {spec.QC.validate(k): v for k, v in thresholds.items() or {}} 1axmlngcbdei

198 MAX_BOUND, MIN_BOUND = (1, 0) 1axmlngcbdei

199 if qc_value is None or np.isnan(qc_value): 1axmlngcbdei

200 return spec.QC.NOT_SET 1amlngbei

201 elif (qc_value > MAX_BOUND) or (qc_value < MIN_BOUND): 1axmlngcbdei

202 raise ValueError('Values out of bound') 1x

203 for crit in filter(None, sorted(spec.QC)): 1amlngcbdei

204 if crit in thresholds.keys() and qc_value >= thresholds[crit]: 1amlngcbdei

205 return crit 1amlngcbdei

206 # if None of this applies, return 'NOT_SET' 

207 return spec.QC.NOT_SET 1algcbdei

208 

209 def __init__(self, session_path_or_eid, **kwargs): 

210 """ 

211 Task QC for training, biased, and ephys choice world. 

212 

213 :param session_path_or_eid: A session eid or path 

214 :param log: A logging.Logger instance, if None the 'ibllib' logger is used 

215 :param one: An ONE instance for fetching and setting the QC on Alyx 

216 """ 

217 # When an eid is provided, we will download the required data by default (if necessary) 

218 self.download_data = not spec.is_session_path(Path(session_path_or_eid)) 1ahcbd

219 super().__init__(session_path_or_eid, **kwargs) 1ahcbd

220 

221 # Data 

222 self.extractor = None 1ahcbd

223 

224 # Metrics and passed trials 

225 self.metrics = None 1ahcbd

226 self.passed = None 1ahcbd

227 

228 # Criteria (initialize as outcomes vary by class, task, and hardware) 

229 self.criteria = BWM_CRITERIA.copy() 1ahcbd

230 

231 def compute(self, **kwargs): 

232 """Compute and store the QC metrics. 

233 

234 Runs the QC on the session and stores a map of the metrics for each datapoint for each 

235 test, and a map of which datapoints passed for each test. 

236 

237 Parameters 

238 ---------- 

239 bpod_only : bool 

240 If True no data is extracted from the FPGA for ephys sessions. 

241 """ 

242 assert self.extractor is not None 1acbdfe

243 

244 ver = self.extractor.settings.get('IBLRIG_VERSION', '') or '0.0.0' 1acbdfe

245 if version.parse(ver) >= version.parse('8.0.0'): 1acbdfe

246 self.criteria['_task_iti_delays'] = {'PASS': 0.99, 'WARNING': 0} 

247 self.criteria['_task_passed_trial_checks'] = {'PASS': 0.7, 'WARNING': 0} 

248 else: 

249 self.criteria['_task_iti_delays'] = {'NOT_SET': 0} 1acbdfe

250 self.criteria['_task_passed_trial_checks'] = {'NOT_SET': 0} 1acbdfe

251 

252 self.log.info(f'Session {self.session_path}: Running QC on behavior data...') 1acbdfe

253 self.get_bpodqc_metrics_frame( 1acbdfe

254 self.extractor.data, 

255 wheel_gain=self.extractor.settings['STIM_GAIN'], # The wheel gain 

256 photodiode=self.extractor.frame_ttls, 

257 audio=self.extractor.audio_ttls, 

258 re_encoding=self.extractor.wheel_encoding or 'X1', 

259 min_qt=self.extractor.settings.get('QUIESCENT_PERIOD') or 0.2, 

260 audio_output=self.extractor.settings.get('device_sound', {}).get('OUTPUT', 'unknown') 

261 ) 

262 

263 def _get_checks(self): 

264 """ 

265 Find all methods that begin with 'check_'. 

266 

267 Returns 

268 ------- 

269 Dict[str, function] 

270 A map of QC check function names and the corresponding functions that return `metric` 

271 (any), `passed` (bool). 

272 """ 

273 def is_metric(x): 1athcbdfe

274 return isfunction(x) and x.__name__.startswith('check_') 1athcbdfe

275 

276 return dict(getmembers(sys.modules[__name__], is_metric)) 1athcbdfe

277 

278 def get_bpodqc_metrics_frame(self, data, **kwargs): 

279 """ 

280 Evaluate task QC metrics. 

281 

282 Evaluates all the QC metric functions in this module (those starting with 'check') and 

283 returns the results. The optional kwargs listed below are passed to each QC metric function. 

284 

285 Parameters 

286 ---------- 

287 data : dict 

288 The extracted task data. 

289 re_encoding : str {'X1', 'X2', 'X4'} 

290 The encoding configuration of the rotary encoder. 

291 enc_res : int 

292 The rotary encoder resolution as number of fronts per revolution. 

293 wheel_gain : float 

294 The STIM_GAIN task parameter. 

295 photodiode : dict 

296 The fronts from Bpod's BNC1 input or FPGA frame2ttl channel. 

297 audio : dict 

298 The fronts from Bpod's BNC2 input FPGA audio sync channel. 

299 min_qt : float 

300 The QUIESCENT_PERIOD task parameter. 

301 

302 Returns 

303 ------- 

304 dict 

305 Map of checks and their QC metric values (1 per trial). 

306 dict 

307 Map of checks and a float array of which samples passed. 

308 """ 

309 # Find all methods that begin with 'check_' 

310 checks = self._get_checks() 1acbdfe

311 prefix = '_task_' # Extended QC fields will start with this 1acbdfe

312 # Method 'check_foobar' stored with key '_task_foobar' in metrics map 

313 qc_metrics_map = {prefix + k[6:]: fn(data, **kwargs) for k, fn in checks.items()} 1acbdfe

314 

315 # Split metrics and passed frames 

316 self.metrics = {} 1acbdfe

317 self.passed = {} 1acbdfe

318 for k in qc_metrics_map: 1acbdfe

319 self.metrics[k], self.passed[k] = qc_metrics_map[k] 1acbdfe

320 

321 # Add a check for trial level pass: did a given trial pass all checks? 

322 n_trials = data['intervals'].shape[0] 1acbdfe

323 # Trial-level checks return an array the length that equals the number of trials 

324 trial_level_passed = [m for m in self.passed.values() if isinstance(m, Sized) and len(m) == n_trials] 1acbdfe

325 name = prefix + 'passed_trial_checks' 1acbdfe

326 self.metrics[name] = reduce(np.logical_and, trial_level_passed or (None, None)) 1acbdfe

327 self.passed[name] = self.metrics[name].astype(float) if trial_level_passed else None 1acbdfe

328 

329 def run(self, update=False, namespace='task', **kwargs): 

330 """ 

331 Compute the QC outcomes and return overall task QC outcome. 

332 

333 Parameters 

334 ---------- 

335 update : bool 

336 If True, updates the session QC fields on Alyx. 

337 namespace : str 

338 The namespace of the QC fields in the Alyx JSON field. 

339 bpod_only : bool 

340 If True no data is extracted from the FPGA for ephys sessions. 

341 

342 Returns 

343 ------- 

344 str 

345 Overall task QC outcome. 

346 dict 

347 A map of QC tests and the proportion of data points that passed them. 

348 """ 

349 if self.metrics is None: 1acbdi

350 self.compute(**kwargs) 1acbd

351 outcome, results, _ = self.compute_session_status() 1acbdi

352 if update: 1acbdi

353 self.update_extended_qc(results) 1ci

354 self.update(outcome, namespace) 1ci

355 return outcome, results 1acbdi

356 

357 def compute_session_status(self): 

358 """ 

359 Compute the overall session QC for each key and aggregates in a single value. 

360 

361 Returns 

362 ------- 

363 str 

364 Overall session QC outcome. 

365 dict 

366 A map of QC tests and the proportion of data points that passed them. 

367 dict 

368 A map of QC tests and their outcomes. 

369 """ 

370 if self.passed is None: 1agcbdei

371 raise AttributeError('passed is None; compute QC first') 1e

372 # Get mean passed of each check, or None if passed is None or all NaN 

373 results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v) 1agcbdei

374 for k, v in self.passed.items()} 

375 session_outcome, outcomes = compute_session_status_from_dict(results, self.criteria) 1agcbdei

376 return session_outcome, results, outcomes 1agcbdei

377 

378 @staticmethod 

379 def compute_dataset_qc_status(outcomes): 

380 """Return map of dataset specific QC values. 

381 

382 Parameters 

383 ---------- 

384 outcomes : dict 

385 Map of checks and their individual outcomes. 

386 

387 Returns 

388 ------- 

389 dict 

390 Map of dataset names and their outcome. 

391 """ 

392 trials_table_outcomes = { 1thc

393 'intervals': outcomes.get('_task_iti_delays', spec.QC.NOT_SET), 

394 'goCue_times': outcomes.get('_task_goCue_delays', spec.QC.NOT_SET), 

395 'response_times': spec.QC.NOT_SET, 'choice': spec.QC.NOT_SET, 

396 'stimOn_times': outcomes.get('_task_stimOn_delays', spec.QC.NOT_SET), 

397 'contrastLeft': spec.QC.NOT_SET, 'contrastRight': spec.QC.NOT_SET, 

398 'feedbackType': spec.QC.NOT_SET, 'probabilityLeft': spec.QC.NOT_SET, 

399 'feedback_times': outcomes.get('_task_errorCue_delays', spec.QC.NOT_SET), 

400 'firstMovement_times': spec.QC.NOT_SET 

401 } 

402 reward_checks = ('_task_reward_volumes', '_task_reward_volume_set') 1thc

403 trials_table_outcomes['rewardVolume']: TaskQC.overall_outcome( 1thc

404 (outcomes.get(x, spec.QC.NOT_SET) for x in reward_checks) 

405 ) 

406 dataset_outcomes = { 1thc

407 '_ibl_trials.stimOff_times': outcomes.get('_task_stimOff_delays', spec.QC.NOT_SET), 

408 '_ibl_trials.table': trials_table_outcomes, 

409 } 

410 return dataset_outcomes 1thc

411 

412 

413class HabituationQC(TaskQC): 

414 """Task QC for habituation choice world.""" 

415 

416 def compute(self, **kwargs): 

417 """Compute and store the QC metrics. 

418 

419 Runs the QC on the session and stores a map of the metrics for each datapoint for each 

420 test, and a map of which datapoints passed for each test. 

421 """ 

422 assert self.extractor is not None 1gb

423 self.log.info(f'Session {self.session_path}: Running QC on habituation data...') 1gb

424 

425 # Initialize checks 

426 prefix = '_task_' 1gb

427 data = self.extractor.data 1gb

428 audio_output = self.extractor.settings.get('device_sound', {}).get('OUTPUT', 'unknown') 1gb

429 metrics = {} 1gb

430 passed = {} 1gb

431 

432 # Modify criteria based on version 

433 ver = self.extractor.settings.get('IBLRIG_VERSION', '') or '0.0.0' 1gb

434 is_v8 = version.parse(ver) >= version.parse('8.0.0') 1gb

435 self.criteria['_task_iti_delays'] = {'PASS': 0.99, 'WARNING': 0} if is_v8 else {'NOT_SET': 0} 1gb

436 

437 # Check all reward volumes == 3.0ul 

438 check = prefix + 'reward_volumes' 1gb

439 metrics[check] = data['rewardVolume'] 1gb

440 passed[check] = metrics[check] == 3.0 1gb

441 

442 # Check session durations are increasing in steps >= 12 minutes 

443 check = prefix + 'habituation_time' 1gb

444 if not self.one or not self.session_path: 1gb

445 self.log.warning('unable to determine session trials without ONE') 

446 metrics[check] = passed[check] = None 

447 else: 

448 subject, session_date = self.session_path.parts[-3:-1] 1gb

449 # compute from the date specified 

450 date_minus_week = ( 1gb

451 datetime.strptime(session_date, '%Y-%m-%d') - timedelta(days=7) 

452 ).strftime('%Y-%m-%d') 

453 sessions = self.one.alyx.rest('sessions', 'list', subject=subject, 1gb

454 date_range=[date_minus_week, session_date], 

455 task_protocol='habituation') 

456 # Remove the current session if already registered 

457 if sessions and sessions[0]['start_time'].startswith(session_date): 1gb

458 sessions = sessions[1:] 

459 metric = ([0, data['intervals'][-1, 1] - data['intervals'][0, 0]] + 1gb

460 [(datetime.fromisoformat(x['end_time']) - 

461 datetime.fromisoformat(x['start_time'])).total_seconds() 

462 for x in [self.one.alyx.get(s['url']) for s in sessions]]) 

463 

464 # The duration from raw trial data 

465 # duration = map(float, self.extractor.raw_data[-1]['elapsed_time'].split(':')) 

466 # duration = timedelta(**dict(zip(('hours', 'minutes', 'seconds'), 

467 # duration))).total_seconds() 

468 metrics[check] = np.array(metric) / 60 1gb

469 passed[check] = np.diff(metric) >= 12 1gb

470 

471 # Check event orders: trial_start < stim on < stim center < feedback < stim off 

472 check = prefix + 'trial_event_sequence' 1gb

473 nans = ( 1gb

474 np.isnan(data['intervals'][:, 0]) | # noqa 

475 np.isnan(data['stimOn_times']) | # noqa 

476 np.isnan(data['stimCenter_times']) | 

477 np.isnan(data['valveOpen_times']) | # noqa 

478 np.isnan(data['stimOff_times']) 

479 ) 

480 a = np.less(data['intervals'][:, 0], data['stimOn_times'], where=~nans) 1gb

481 b = np.less(data['stimOn_times'], data['stimCenter_times'], where=~nans) 1gb

482 c = np.less(data['stimCenter_times'], data['valveOpen_times'], where=~nans) 1gb

483 d = np.less(data['valveOpen_times'], data['stimOff_times'], where=~nans) 1gb

484 

485 metrics[check] = a & b & c & d & ~nans 1gb

486 passed[check] = metrics[check].astype(float) 1gb

487 

488 # Check that the time difference between the visual stimulus center-command being 

489 # triggered and the stimulus effectively appearing in the center is smaller than 150 ms. 

490 check = prefix + 'stimCenter_delays' 1gb

491 metric = np.nan_to_num(data['stimCenter_times'] - data['stimCenterTrigger_times'], 1gb

492 nan=np.inf) 

493 passed[check] = (metric <= 0.15) & (metric > 0) 1gb

494 metrics[check] = metric 1gb

495 

496 # Phase check 

497 check = prefix + 'phase' 1gb

498 metric = data['phase'] 1gb

499 passed[check] = (metric <= 2 * np.pi) & (metric >= 0) 1gb

500 metrics[check] = metric 1gb

501 

502 # This is not very useful as a check because there are so few trials 

503 check = prefix + 'phase_distribution' 1gb

504 metric, _ = np.histogram(data['phase']) 1gb

505 _, p = chisquare(metric) 1gb

506 passed[check] = p < 0.05 if len(data['phase']) >= 400 else None # skip if too few trials 1gb

507 metrics[check] = metric 1gb

508 

509 # Check that the period of gray screen between stim off and the start of the next trial is 

510 # 1s +/- 10%. 

511 check = prefix + 'iti_delays' 1gb

512 iti = (np.roll(data['stimOn_times'], -1) - data['stimOff_times'])[:-1] 1gb

513 metric = np.r_[np.nan_to_num(iti, nan=np.inf), np.nan] - 1. 1gb

514 passed[check] = np.abs(metric) <= 0.1 1gb

515 passed[check][-1] = np.nan 1gb

516 metrics[check] = metric 1gb

517 

518 # Check stim on go cue delay 

519 # Unlike in ChoiceWorld, the go cue is in a race condition with the stimulus onset and 

520 # therefore usually presents slightly before the stimulus. For now we will keep the QC 

521 # thresholds the same as in ChoiceWorld which means this will always fail 

522 check = prefix + 'stimOn_goCue_delays' 1gb

523 # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. 

524 threshold = 0.01 if audio_output.lower() == 'harp' else 0.053 1gb

525 metric = np.nan_to_num(data['goCue_times'] - data['stimOn_times'], nan=np.inf) 1gb

526 passed[check] = np.abs(metric) < threshold 1gb

527 metrics[check] = metric 1gb

528 

529 # Checks common to training QC 

530 checks = [check_goCue_delays, check_stimOn_delays, check_stimOff_delays] 1gb

531 for fcn in checks: 1gb

532 check = prefix + fcn.__name__[6:] 1gb

533 metrics[check], passed[check] = fcn(data, audio_output=audio_output) 1gb

534 

535 self.metrics, self.passed = (metrics, passed) 1gb

536 

537 

538# SINGLE METRICS 

539# ---------------------------------------------------------------------------- # 

540 

541# === Delays between events checks === 

542 

543def check_stimOn_goCue_delays(data, audio_output='harp', **_): 

544 """ 

545 Check the go cue tone occurs less than 10ms before stimulus on. 

546 

547 Checks that the time difference between the onset of the visual stimulus 

548 and the onset of the go cue tone is positive and less than 10ms. 

549 

550 Metric: 

551 M = stimOn_times - goCue_times 

552 

553 Criteria: 

554 0 < M < 0.010 s 

555 

556 Units: 

557 seconds [s] 

558 

559 :param data: dict of trial data with keys ('goCue_times', 'stimOn_times', 'intervals') 

560 :param audio_output: audio output device name. 

561 

562 Notes 

563 ----- 

564 - For non-harp sound card the permissible delay is 0.053s. This was chosen by taking the 99.5th 

565 percentile of delays over 500 training sessions using the Xonar soundcard. 

566 """ 

567 # Calculate the difference between stimOn and goCue times. 

568 # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. 

569 threshold = 0.01 if audio_output.lower() == 'harp' else 0.053 1aBcbdfe

570 metric = np.nan_to_num(data['goCue_times'] - data['stimOn_times'], nan=np.inf) 1aBcbdfe

571 passed = (metric < threshold) & (metric > 0) 1aBcbdfe

572 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aBcbdfe

573 return metric, passed 1aBcbdfe

574 

575 

576def check_response_feedback_delays(data, audio_output='harp', **_): 

577 """ 

578 Check the feedback delivered within 10ms of the response threshold. 

579 

580 Checks that the time difference between the response and the feedback onset 

581 (error sound or valve) is positive and less than 10ms. 

582 

583 Metric: 

584 M = feedback_time - response_time 

585 

586 Criterion: 

587 0 < M < 0.010 s 

588 

589 Units: 

590 seconds [s] 

591 

592 :param data: dict of trial data with keys ('feedback_times', 'response_times', 'intervals') 

593 :param audio_output: audio output device name. 

594 

595 Notes 

596 ----- 

597 - For non-harp sound card the permissible delay is 0.053s. This was chosen by taking the 99.5th 

598 percentile of delays over 500 training sessions using the Xonar soundcard. 

599 """ 

600 threshold = 0.01 if audio_output.lower() == 'harp' else 0.053 1aCcbdfe

601 metric = np.nan_to_num(data['feedback_times'] - data['response_times'], nan=np.inf) 1aCcbdfe

602 passed = (metric < threshold) & (metric > 0) 1aCcbdfe

603 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aCcbdfe

604 return metric, passed 1aCcbdfe

605 

606 

607def check_response_stimFreeze_delays(data, **_): 

608 """ 

609 Check the stimulus freezes within 100ms of the expected time. 

610 

611 Checks that the time difference between the visual stimulus freezing and the 

612 response is positive and less than 100ms. 

613 

614 Metric: 

615 M = (stimFreeze_times - response_times) 

616 

617 Criterion: 

618 0 < M < 0.100 s 

619 

620 Units: 

621 seconds [s] 

622 

623 :param data: dict of trial data with keys ('stimFreeze_times', 'response_times', 'intervals', 

624 'choice') 

625 """ 

626 # Calculate the difference between stimOn and goCue times. 

627 # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. 

628 metric = np.nan_to_num(data['stimFreeze_times'] - data['response_times'], nan=np.inf) 1aDcbdfe

629 # Test for valid values 

630 passed = ((metric < 0.1) & (metric > 0)).astype(float) 1aDcbdfe

631 # Finally remove no_go trials (stimFreeze triggered differently in no_go trials) 

632 # These values are ignored in calculation of proportion passed 

633 passed[data['choice'] == 0] = np.nan 1aDcbdfe

634 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aDcbdfe

635 return metric, passed 1aDcbdfe

636 

637 

638def check_stimOff_itiIn_delays(data, **_): 

639 """Check that the start of the trial interval is within 10ms of the visual stimulus turning off. 

640 

641 Metric: 

642 M = itiIn_times - stimOff_times 

643 

644 Criterion: 

645 0 < M < 0.010 s 

646 

647 Units: 

648 seconds [s] 

649 

650 :param data: dict of trial data with keys ('stimOff_times', 'itiIn_times', 'intervals', 

651 'choice') 

652 """ 

653 # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. 

654 metric = np.nan_to_num(data['itiIn_times'] - data['stimOff_times'], nan=np.inf) 1aEcbdfe

655 passed = ((metric < 0.01) & (metric >= 0)).astype(float) 1aEcbdfe

656 # Remove no_go trials (stimOff triggered differently in no_go trials) 

657 # NaN values are ignored in calculation of proportion passed 

658 metric[data['choice'] == 0] = passed[data['choice'] == 0] = np.nan 1aEcbdfe

659 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aEcbdfe

660 return metric, passed 1aEcbdfe

661 

662 

663def check_iti_delays(data, subtract_pauses=False, iti_delay_secs=ITI_DELAY_SECS, 

664 feedback_nogo_delay_secs=FEEDBACK_NOGO_DELAY_SECS, **_): 

665 """ 

666 Check the open-loop grey screen period is approximately 1 second. 

667 

668 Check that the period of grey screen between stim off and the start of the next trial is 

669 1s +/- 10%. If the trial was paused during this time, the check will account for that 

670 

671 Metric: 

672 M = stimOff (n) - trialStart (n+1) - 1. 

673 

674 Criterion: 

675 |M| < 0.1 

676 

677 Units: 

678 seconds [s] 

679 

680 Parameters 

681 ---------- 

682 data : dict 

683 Trial data with keys ('stimOff_times', 'intervals', 'pause_duration'). 

684 subtract_pauses: bool 

685 If True, account for experimenter-initiated pauses between trials; if False, trials where 

686 the experimenter paused the task may fail this check. 

687 

688 Returns 

689 ------- 

690 numpy.array 

691 An array of metric values to threshold. 

692 numpy.array 

693 An array of boolean values, 1 per trial, where True means trial passes QC threshold. 

694 """ 

695 # Initialize array the length of completed trials 

696 metric = np.full(data['intervals'].shape[0], np.nan) 1aucbdfe

697 passed = metric.copy() 1aucbdfe

698 pauses = (data['pause_duration'] if subtract_pauses else np.zeros_like(metric))[:-1] 1aucbdfe

699 # Get the difference between stim off and the start of the next trial 

700 # Missing data are set to Inf, except for the last trial which is a NaN 

701 metric[:-1] = np.nan_to_num( 1aucbdfe

702 data['intervals'][1:, 0] - data['stimOff_times'][:-1] - iti_delay_secs - pauses, 

703 nan=np.inf 

704 ) 

705 metric[data['choice'] == 0] = metric[data['choice'] == 0] - feedback_nogo_delay_secs 1aucbdfe

706 passed[:-1] = np.abs(metric[:-1]) < (iti_delay_secs / 10) # Last trial is not counted 1aucbdfe

707 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aucbdfe

708 return metric, passed 1aucbdfe

709 

710 

711def check_positive_feedback_stimOff_delays(data, **_): 

712 """ 

713 Check stimulus offset occurs approximately 1 second after reward delivered. 

714 

715 Check that the time difference between the valve onset and the visual stimulus turning off 

716 is 1 ± 0.150 seconds. 

717 

718 Metric: 

719 M = stimOff_times - feedback_times - 1s 

720 

721 Criterion: 

722 |M| < 0.150 s 

723 

724 Units: 

725 seconds [s] 

726 

727 :param data: dict of trial data with keys ('stimOff_times', 'feedback_times', 'intervals', 

728 'correct') 

729 """ 

730 # If either are NaN, the result will be Inf to ensure that it crosses the failure threshold. 

731 metric = np.nan_to_num(data['stimOff_times'] - data['feedback_times'] - 1, nan=np.inf) 1aFcbdfe

732 passed = (np.abs(metric) < 0.15).astype(float) 1aFcbdfe

733 # NaN values are ignored in calculation of proportion passed; ignore incorrect trials here 

734 metric[~data['correct']] = passed[~data['correct']] = np.nan 1aFcbdfe

735 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aFcbdfe

736 return metric, passed 1aFcbdfe

737 

738 

739def check_negative_feedback_stimOff_delays(data, feedback_nogo_delay_secs=FEEDBACK_NOGO_DELAY_SECS, **_): 

740 """ 

741 Check the stimulus offset occurs approximately 2 seconds after negative feedback delivery. 

742 

743 Check that the time difference between the error sound and the visual stimulus 

744 turning off is 2 ± 0.150 seconds. 

745 

746 Metric: 

747 M = stimOff_times - errorCue_times - 2s 

748 

749 Criterion: 

750 |M| < 0.150 s 

751 

752 Units: 

753 seconds [s] 

754 

755 :param data: dict of trial data with keys ('stimOff_times', 'errorCue_times', 'intervals') 

756 """ 

757 metric = np.nan_to_num(data['stimOff_times'] - data['errorCue_times'] - 2, nan=np.inf) 1azcbdfe

758 # for the nogo trials, the feedback is the same as the stimOff 

759 metric[data['choice'] == 0] = metric[data['choice'] == 0] + feedback_nogo_delay_secs 1azcbdfe

760 # Apply criteria 

761 passed = (np.abs(metric) < 0.15).astype(float) 1azcbdfe

762 # Remove none negative feedback trials 

763 metric[data['correct']] = passed[data['correct']] = np.nan 1azcbdfe

764 assert data['intervals'].shape[0] == len(metric) == len(passed) 1azcbdfe

765 return metric, passed 1azcbdfe

766 

767 

768# === Wheel movement during trial checks === 

769 

770def check_wheel_move_before_feedback(data, **_): 

771 """Check that the wheel does move within 100ms of the feedback onset (error sound or valve). 

772 

773 Metric: 

774 M = (w_t - 0.05) - (w_t + 0.05), where t = feedback_times 

775 

776 Criterion: 

777 M != 0 

778 

779 Units: 

780 radians 

781 

782 :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'choice', 

783 'intervals', 'feedback_times') 

784 """ 

785 # Get tuple of wheel times and positions within 100ms of feedback 

786 traces = traces_by_trial( 1aocbdfe

787 data['wheel_timestamps'], 

788 data['wheel_position'], 

789 start=data['feedback_times'] - 0.05, 

790 end=data['feedback_times'] + 0.05, 

791 ) 

792 metric = np.zeros_like(data['feedback_times']) 1aocbdfe

793 # For each trial find the displacement 

794 for i, trial in enumerate(traces): 1aocbdfe

795 pos = trial[1] 1aocbdfe

796 if pos.size > 1: 1aocbdfe

797 metric[i] = pos[-1] - pos[0] 1aocbdfe

798 

799 # except no-go trials 

800 metric[data['choice'] == 0] = np.nan # NaN = trial ignored for this check 1aocbdfe

801 nans = np.isnan(metric) 1aocbdfe

802 passed = np.zeros_like(metric) * np.nan 1aocbdfe

803 

804 passed[~nans] = (metric[~nans] != 0).astype(float) 1aocbdfe

805 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aocbdfe

806 return metric, passed 1aocbdfe

807 

808 

809def _wheel_move_during_closed_loop(re_ts, re_pos, data, wheel_gain=None, tol=1, **_): 

810 """ 

811 Check the wheel moves the correct amount to reach threshold. 

812 

813 Check that the wheel moves by approximately 35 degrees during the closed-loop period 

814 on trials where a feedback (error sound or valve) is delivered. 

815 

816 Metric: 

817 M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response 

818 time, w_t0 = position at go cue time, threshold_displacement = displacement required to 

819 move 35 visual degrees 

820 

821 Criterion: 

822 displacement < tol visual degree 

823 

824 Units: 

825 degrees angle of wheel turn 

826 

827 :param re_ts: extracted wheel timestamps in seconds 

828 :param re_pos: extracted wheel positions in radians 

829 :param data: a dict with the keys (goCueTrigger_times, response_times, feedback_times, 

830 position, choice, intervals) 

831 :param wheel_gain: the 'STIM_GAIN' task setting 

832 :param tol: the criterion in visual degrees 

833 """ 

834 if wheel_gain is None: 1ajcbdfe

835 _log.warning('No wheel_gain input in function call, returning None') 

836 return None, None 

837 

838 # Get tuple of wheel times and positions over each trial's closed-loop period 

839 traces = traces_by_trial(re_ts, re_pos, 1ajcbdfe

840 start=data['goCueTrigger_times'], 

841 end=data['response_times']) 

842 

843 metric = np.zeros_like(data['feedback_times']) 1ajcbdfe

844 # For each trial find the absolute displacement 

845 for i, trial in enumerate(traces): 1ajcbdfe

846 t, pos = trial 1ajcbdfe

847 if pos.size != 0: 1ajcbdfe

848 # Find the position of the preceding sample and subtract it 

849 idx = np.abs(re_ts - t[0]).argmin() - 1 1ajcbdfe

850 origin = re_pos[idx] 1ajcbdfe

851 metric[i] = np.abs(pos - origin).max() 1ajcbdfe

852 

853 # Load wheel_gain and thresholds for each trial 

854 wheel_gain = np.array([wheel_gain] * len(data['position'])) 1ajcbdfe

855 thresh = data['position'] 1ajcbdfe

856 # abs displacement, s, in mm required to move 35 visual degrees 

857 s_mm = np.abs(thresh / wheel_gain) # don't care about direction 1ajcbdfe

858 criterion = cm_to_rad(s_mm * 1e-1) # convert abs displacement to radians (wheel pos is in rad) 1ajcbdfe

859 metric = metric - criterion # difference should be close to 0 1ajcbdfe

860 rad_per_deg = cm_to_rad(1 / wheel_gain * 1e-1) 1ajcbdfe

861 passed = (np.abs(metric) < rad_per_deg * tol).astype(float) # less than 1 visual degree off 1ajcbdfe

862 metric[data['choice'] == 0] = passed[data['choice'] == 0] = np.nan # except no-go trials 1ajcbdfe

863 assert data['intervals'].shape[0] == len(metric) == len(passed) 1ajcbdfe

864 return metric, passed 1ajcbdfe

865 

866 

867def check_wheel_move_during_closed_loop(data, wheel_gain=None, **_): 

868 """ 

869 Check the wheel moves the correct amount to reach threshold. 

870 

871 Check that the wheel moves by approximately 35 degrees during the closed-loop period 

872 on trials where a feedback (error sound or valve) is delivered. 

873 

874 Metric: 

875 M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response 

876 time, w_t0 = position at go cue time, threshold_displacement = displacement required to 

877 move 35 visual degrees 

878 

879 Criterion: 

880 displacement < 3 visual degrees 

881 

882 Units: 

883 degrees angle of wheel turn 

884 

885 :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'choice', 

886 'intervals', 'goCueTrigger_times', 'response_times', 'feedback_times', 'position') 

887 :param wheel_gain: the 'STIM_GAIN' task setting 

888 """ 

889 # Get the Bpod extracted wheel data 

890 timestamps = data['wheel_timestamps'] 1ajcbdfe

891 position = data['wheel_position'] 1ajcbdfe

892 

893 return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=3) 1ajcbdfe

894 

895 

896def check_wheel_move_during_closed_loop_bpod(data, wheel_gain=None, **_): 

897 """ 

898 Check the wheel moves the correct amount to reach threshold. 

899 

900 Check that the wheel moves by approximately 35 degrees during the closed-loop period 

901 on trials where a feedback (error sound or valve) is delivered. This check uses the Bpod 

902 wheel data (measured at a lower resolution) with a stricter tolerance (1 visual degree). 

903 

904 Metric: 

905 M = abs(w_resp - w_t0) - threshold_displacement, where w_resp = position at response 

906 time, w_t0 = position at go cue time, threshold_displacement = displacement required to 

907 move 35 visual degrees. 

908 

909 Criterion: 

910 displacement < 1 visual degree 

911 

912 Units: 

913 degrees angle of wheel turn 

914 

915 :param data: dict of trial data with keys ('wheel_timestamps(_bpod)', 'wheel_position(_bpod)', 

916 'choice', 'intervals', 'goCueTrigger_times', 'response_times', 'feedback_times', 'position') 

917 :param wheel_gain: the 'STIM_GAIN' task setting 

918 """ 

919 # Get the Bpod extracted wheel data 

920 timestamps = data.get('wheel_timestamps_bpod', data['wheel_timestamps']) 1acbdfe

921 position = data.get('wheel_position_bpod', data['wheel_position']) 1acbdfe

922 

923 return _wheel_move_during_closed_loop(timestamps, position, data, wheel_gain, tol=1) 1acbdfe

924 

925 

926def check_wheel_freeze_during_quiescence(data, **_): 

927 """ 

928 Check the wheel is indeed still during the quiescent period. 

929 

930 Check that the wheel does not move more than 2 degrees in each direction during the 

931 quiescence interval before the stimulus appears. 

932 

933 Metric: 

934 M = |max(W) - min(W)| where W is wheel pos over quiescence interval; 

935 interval = [stimOnTrigger_times - quiescent_duration, stimOnTrigger_times] 

936 

937 Criterion: 

938 M < 2 degrees 

939 

940 Units: 

941 degrees angle of wheel turn 

942 

943 :param data: dict of trial data with keys ('wheel_timestamps', 'wheel_position', 'quiescence', 

944 'intervals', 'stimOnTrigger_times') 

945 """ 

946 assert np.all(np.diff(data['wheel_timestamps']) >= 0) 1akcbdfe

947 assert data['quiescence'].size == data['stimOnTrigger_times'].size 1akcbdfe

948 # Get tuple of wheel times and positions over each trial's quiescence period 

949 qevt_start_times = data['stimOnTrigger_times'] - data['quiescence'] 1akcbdfe

950 traces = traces_by_trial( 1akcbdfe

951 data['wheel_timestamps'], 

952 data['wheel_position'], 

953 start=qevt_start_times, 

954 end=data['stimOnTrigger_times'] 

955 ) 

956 

957 metric = np.zeros((len(data['quiescence']), 2)) # (n_trials, n_directions) 1akcbdfe

958 for i, trial in enumerate(traces): 1akcbdfe

959 t, pos = trial 1akcbdfe

960 # Get the last position before the period began 

961 if pos.size > 0: 1akcbdfe

962 # Find the position of the preceding sample and subtract it 

963 idx = np.abs(data['wheel_timestamps'] - t[0]).argmin() - 1 1akcbdfe

964 origin = data['wheel_position'][idx if idx != -1 else 0] 1akcbdfe

965 # Find the absolute min and max relative to the last sample 

966 metric[i, :] = np.abs([np.min(pos - origin), np.max(pos - origin)]) 1akcbdfe

967 # Reduce to the largest displacement found in any direction 

968 metric = np.max(metric, axis=1) 1akcbdfe

969 metric = 180 * metric / np.pi # convert to degrees from radians 1akcbdfe

970 criterion = 2 # Position shouldn't change more than 2 in either direction 1akcbdfe

971 passed = metric < criterion 1akcbdfe

972 assert data['intervals'].shape[0] == len(metric) == len(passed) 1akcbdfe

973 return metric, passed 1akcbdfe

974 

975 

976def check_detected_wheel_moves(data, min_qt=0, **_): 

977 """Check that the detected first movement times are reasonable. 

978 

979 Metric: 

980 M = firstMovement times 

981 

982 Criterion: 

983 (goCue trigger time - min quiescent period) < M < response time 

984 

985 Units: 

986 Seconds [s] 

987 

988 :param data: dict of trial data with keys ('firstMovement_times', 'goCueTrigger_times', 

989 'response_times', 'choice', 'intervals') 

990 :param min_qt: the minimum possible quiescent period (the QUIESCENT_PERIOD task parameter) 

991 """ 

992 # Depending on task version this may be a single value or an array of quiescent periods 

993 min_qt = np.array(min_qt) 1ascbdfe

994 if min_qt.size > data['intervals'].shape[0]: 1ascbdfe

995 min_qt = min_qt[:data['intervals'].shape[0]] 1d

996 

997 metric = data['firstMovement_times'] 1ascbdfe

998 qevt_start = data['goCueTrigger_times'] - np.array(min_qt) 1ascbdfe

999 response = data['response_times'] 1ascbdfe

1000 # First movement time for each trial should be after the quiescent period and before feedback 

1001 passed = np.array([a < m < b for m, a, b in zip(metric, qevt_start, response)], dtype=float) 1ascbdfe

1002 nogo = data['choice'] == 0 1ascbdfe

1003 passed[nogo] = np.nan # No go trial may have no movement times and that's fine 1ascbdfe

1004 return metric, passed 1ascbdfe

1005 

1006 

1007# === Sequence of events checks === 

1008 

1009def check_error_trial_event_sequence(data, **_): 

1010 """ 

1011 Check trial events occur in correct order for negative feedback trials. 

1012 

1013 Check that on incorrect / miss trials, there are exactly: 

1014 2 audio events (go cue sound and error sound) and 2 Bpod events (trial start, ITI), occurring 

1015 in the correct order 

1016 

1017 Metric: 

1018 M = Bpod (trial start) > audio (go cue) > audio (error) > Bpod (ITI) > Bpod (trial end) 

1019 

1020 Criterion: 

1021 M == True 

1022 

1023 Units: 

1024 -none- 

1025 

1026 :param data: dict of trial data with keys ('errorCue_times', 'goCue_times', 'intervals', 

1027 'itiIn_times', 'correct') 

1028 """ 

1029 # An array the length of N trials where True means at least one event time was NaN (bad) 

1030 nans = ( 1aqcbdfe

1031 np.isnan(data['intervals'][:, 0]) | 

1032 np.isnan(data['goCue_times']) | # noqa 

1033 np.isnan(data['errorCue_times']) | # noqa 

1034 np.isnan(data['itiIn_times']) | # noqa 

1035 np.isnan(data['intervals'][:, 1]) 

1036 ) 

1037 

1038 # For each trial check that the events happened in the correct order (ignore NaN values) 

1039 a = np.less(data['intervals'][:, 0], data['goCue_times'], where=~nans) # Start time < go cue 1aqcbdfe

1040 b = np.less(data['goCue_times'], data['errorCue_times'], where=~nans) # Go cue < error cue 1aqcbdfe

1041 c = np.less(data['errorCue_times'], data['itiIn_times'], where=~nans) # Error cue < ITI start 1aqcbdfe

1042 d = np.less(data['itiIn_times'], data['intervals'][:, 1], where=~nans) # ITI start < end time 1aqcbdfe

1043 

1044 # For each trial check all events were in order AND all event times were not NaN 

1045 metric = a & b & c & d & ~nans 1aqcbdfe

1046 

1047 passed = metric.astype(float) 1aqcbdfe

1048 passed[data['correct']] = np.nan # Look only at incorrect trials 1aqcbdfe

1049 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aqcbdfe

1050 return metric, passed 1aqcbdfe

1051 

1052 

1053def check_correct_trial_event_sequence(data, **_): 

1054 """ 

1055 Check trial events occur in correct order for positive feedback trials. 

1056 

1057 Check that on correct trials, there are exactly: 

1058 1 audio events and 3 Bpod events (valve open, trial start, ITI), occurring in the correct order 

1059 

1060 Metric: 

1061 M = Bpod (trial start) > audio (go cue) > Bpod (valve) > Bpod (ITI) > Bpod (trial end) 

1062 

1063 Criterion: 

1064 M == True 

1065 

1066 Units: 

1067 -none- 

1068 

1069 :param data: dict of trial data with keys ('valveOpen_times', 'goCue_times', 'intervals', 

1070 'itiIn_times', 'correct') 

1071 """ 

1072 # An array the length of N trials where True means at least one event time was NaN (bad) 

1073 nans = ( 1arcbdfe

1074 np.isnan(data['intervals'][:, 0]) | 

1075 np.isnan(data['goCue_times']) | # noqa 

1076 np.isnan(data['valveOpen_times']) | 

1077 np.isnan(data['itiIn_times']) | # noqa 

1078 np.isnan(data['intervals'][:, 1]) 

1079 ) 

1080 

1081 # For each trial check that the events happened in the correct order (ignore NaN values) 

1082 a = np.less(data['intervals'][:, 0], data['goCue_times'], where=~nans) # Start time < go cue 1arcbdfe

1083 b = np.less(data['goCue_times'], data['valveOpen_times'], where=~nans) # Go cue < feedback 1arcbdfe

1084 c = np.less(data['valveOpen_times'], data['itiIn_times'], where=~nans) # Feedback < ITI start 1arcbdfe

1085 d = np.less(data['itiIn_times'], data['intervals'][:, 1], where=~nans) # ITI start < end time 1arcbdfe

1086 

1087 # For each trial True means all events were in order AND all event times were not NaN 

1088 metric = a & b & c & d & ~nans 1arcbdfe

1089 

1090 passed = metric.astype(float) 1arcbdfe

1091 passed[~data['correct']] = np.nan # Look only at correct trials 1arcbdfe

1092 assert data['intervals'].shape[0] == len(metric) == len(passed) 1arcbdfe

1093 return metric, passed 1arcbdfe

1094 

1095 

1096def check_n_trial_events(data, **_): 

1097 """Check that the number events per trial is correct. 

1098 

1099 Within every trial interval there should be one of each trial event, except for 

1100 goCueTrigger_times which should only be defined for incorrect trials 

1101 

1102 Metric: 

1103 M = all(start < event < end) for all event times except errorCueTrigger_times where 

1104 start < error_trigger < end if not correct trial, else error_trigger == NaN 

1105 

1106 Criterion: 

1107 M == True 

1108 

1109 Units: 

1110 -none-, boolean 

1111 

1112 :param data: dict of trial data with keys ('intervals', 'stimOnTrigger_times', 

1113 'stimOffTrigger_times', 'stimOn_times', 'stimOff_times', 

1114 'stimFreezeTrigger_times', 'errorCueTrigger_times', 'itiIn_times', 

1115 'goCueTrigger_times', 'goCue_times', 'response_times', 'feedback_times') 

1116 """ 

1117 intervals = data['intervals'] 1apcbdfe

1118 correct = data['correct'] 1apcbdfe

1119 err_trig = data['errorCueTrigger_times'] 1apcbdfe

1120 

1121 # Exclude these fields; valve and errorCue times are the same as feedback_times and we must 

1122 # test errorCueTrigger_times separately 

1123 # stimFreeze_times fails often due to TTL flicker 

1124 exclude = ['camera_timestamps', 'errorCueTrigger_times', 'errorCue_times', 1apcbdfe

1125 'wheelMoves_peakVelocity_times', 'valveOpen_times', 'wheelMoves_peakAmplitude', 

1126 'wheelMoves_intervals', 'wheel_timestamps', 'stimFreeze_times'] 

1127 events = [k for k in data.keys() if k.endswith('_times') and k not in exclude] 1apcbdfe

1128 metric = np.zeros(data['intervals'].shape[0], dtype=bool) 1apcbdfe

1129 

1130 # For each trial interval check that one of each trial event occurred. For incorrect trials, 

1131 # check the error cue trigger occurred within the interval, otherwise check it is nan. 

1132 for i, (start, end) in enumerate(intervals): 1apcbdfe

1133 metric[i] = (all([start < data[k][i] < end for k in events]) and 1apcbdfe

1134 (np.isnan(err_trig[i]) if correct[i] else start < err_trig[i] < end)) 

1135 passed = metric.astype(bool) 1apcbdfe

1136 assert intervals.shape[0] == len(metric) == len(passed) 1apcbdfe

1137 return metric, passed 1apcbdfe

1138 

1139 

1140def check_trial_length(data, **_): 

1141 """ 

1142 Check open-loop duration positive and <= 1 minute. 

1143 

1144 Check that the time difference between the onset of the go cue sound 

1145 and the feedback (error sound or valve) is positive and smaller than 60.1 s. 

1146 

1147 Metric: 

1148 M = feedback_times - goCue_times 

1149 

1150 Criteria: 

1151 0 < M < 60.1 s 

1152 

1153 Units: 

1154 seconds [s] 

1155 

1156 :param data: dict of trial data with keys ('feedback_times', 'goCue_times', 'intervals') 

1157 """ 

1158 # NaN values are usually ignored so replace them with Inf so they fail the threshold 

1159 metric = np.nan_to_num(data['feedback_times'] - data['goCue_times'], nan=np.inf) 1aHcbdfe

1160 passed = (metric < 60.1) & (metric > 0) 1aHcbdfe

1161 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aHcbdfe

1162 return metric, passed 1aHcbdfe

1163 

1164 

1165# === Trigger-response delay checks === 

1166 

1167def check_goCue_delays(data, audio_output='harp', **_): 

1168 """ 

1169 Check the go cue tone occurs within 1ms of the intended time. 

1170 

1171 Check that the time difference between the go cue sound being triggered and 

1172 effectively played is smaller than 1ms. 

1173 

1174 Metric: 

1175 M = goCue_times - goCueTrigger_times 

1176 

1177 Criterion: 

1178 0 < M <= 0.0015 s 

1179 

1180 Units: 

1181 seconds [s] 

1182 

1183 :param data: dict of trial data with keys ('goCue_times', 'goCueTrigger_times', 'intervals'). 

1184 :param audio_output: audio output device name. 

1185 

1186 Notes 

1187 ----- 

1188 - For non-harp sound card the permissible delay is 0.053s. This was chosen by taking the 99.5th 

1189 percentile of delays over 500 training sessions using the Xonar soundcard. 

1190 """ 

1191 threshold = 0.0015 if audio_output.lower() == 'harp' else 0.053 1agGcbdfe

1192 metric = np.nan_to_num(data['goCue_times'] - data['goCueTrigger_times'], nan=np.inf) 1agGcbdfe

1193 passed = (metric <= threshold) & (metric > 0) 1agGcbdfe

1194 assert data['intervals'].shape[0] == len(metric) == len(passed) 1agGcbdfe

1195 return metric, passed 1agGcbdfe

1196 

1197 

1198def check_errorCue_delays(data, audio_output='harp', **_): 

1199 """ 

1200 Check the error tone occurs within 1.5ms of the intended time. 

1201 

1202 Check that the time difference between the error sound being triggered and 

1203 effectively played is smaller than 1ms. 

1204 

1205 Metric: 

1206 M = errorCue_times - errorCueTrigger_times 

1207 

1208 Criterion: 

1209 0 < M <= 0.0015 s 

1210 

1211 Units: 

1212 seconds [s] 

1213 

1214 :param data: dict of trial data with keys ('errorCue_times', 'errorCueTrigger_times', 

1215 'intervals', 'correct') 

1216 :param audio_output: audio output device name. 

1217 

1218 Notes 

1219 ----- 

1220 - For non-harp sound card the permissible delay is 0.062s. This was chosen by taking the 99.5th 

1221 percentile of delays over 500 training sessions using the Xonar soundcard. 

1222 """ 

1223 threshold = 0.0015 if audio_output.lower() == 'harp' else 0.062 1aAcbdfe

1224 metric = np.nan_to_num(data['errorCue_times'] - data['errorCueTrigger_times'], nan=np.inf) 1aAcbdfe

1225 passed = ((metric <= threshold) & (metric > 0)).astype(float) 1aAcbdfe

1226 passed[data['correct']] = metric[data['correct']] = np.nan 1aAcbdfe

1227 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aAcbdfe

1228 return metric, passed 1aAcbdfe

1229 

1230 

1231def check_stimOn_delays(data, **_): 

1232 """ 

1233 Check the visual stimulus onset occurs within 150ms of the intended time. 

1234 

1235 Check that the time difference between the visual stimulus onset-command being triggered 

1236 and the stimulus effectively appearing on the screen is smaller than 150 ms. 

1237 

1238 Metric: 

1239 M = stimOn_times - stimOnTrigger_times 

1240 

1241 Criterion: 

1242 0 < M < 0.15 s 

1243 

1244 Units: 

1245 seconds [s] 

1246 

1247 :param data: dict of trial data with keys ('stimOn_times', 'stimOnTrigger_times', 

1248 'intervals') 

1249 """ 

1250 metric = np.nan_to_num(data['stimOn_times'] - data['stimOnTrigger_times'], nan=np.inf) 1agIcbdfe

1251 passed = (metric <= 0.15) & (metric > 0) 1agIcbdfe

1252 assert data['intervals'].shape[0] == len(metric) == len(passed) 1agIcbdfe

1253 return metric, passed 1agIcbdfe

1254 

1255 

1256def check_stimOff_delays(data, **_): 

1257 """ 

1258 Check stimulus offset occurs within 150ms of the intended time. 

1259 

1260 Check that the time difference between the visual stimulus offset-command 

1261 being triggered and the visual stimulus effectively turning off on the screen 

1262 is smaller than 150 ms. 

1263 

1264 Metric: 

1265 M = stimOff_times - stimOffTrigger_times 

1266 

1267 Criterion: 

1268 0 < M < 0.15 s 

1269 

1270 Units: 

1271 seconds [s] 

1272 

1273 :param data: dict of trial data with keys ('stimOff_times', 'stimOffTrigger_times', 

1274 'intervals') 

1275 """ 

1276 metric = np.nan_to_num(data['stimOff_times'] - data['stimOffTrigger_times'], nan=np.inf) 1agJcbdfe

1277 passed = (metric <= 0.15) & (metric > 0) 1agJcbdfe

1278 assert data['intervals'].shape[0] == len(metric) == len(passed) 1agJcbdfe

1279 return metric, passed 1agJcbdfe

1280 

1281 

1282def check_stimFreeze_delays(data, **_): 

1283 """Check the stimulus freezes within 150ms of the intended time. 

1284 

1285 Check that the time difference between the visual stimulus freeze-command 

1286 being triggered and the visual stimulus effectively freezing on the screen 

1287 is smaller than 150 ms. 

1288 

1289 Metric: 

1290 M = stimFreeze_times - stimFreezeTrigger_times 

1291 

1292 Criterion: 

1293 0 < M < 0.15 s 

1294 

1295 Units: 

1296 seconds [s] 

1297 

1298 :param data: dict of trial data with keys ('stimFreeze_times', 'stimFreezeTrigger_times', 

1299 'intervals') 

1300 """ 

1301 metric = np.nan_to_num(data['stimFreeze_times'] - data['stimFreezeTrigger_times'], nan=np.inf) 1aKcbdfe

1302 passed = (metric <= 0.15) & (metric > 0) 1aKcbdfe

1303 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aKcbdfe

1304 return metric, passed 1aKcbdfe

1305 

1306 

1307# === Data integrity checks === 

1308 

1309def check_reward_volumes(data, **_): 

1310 """Check that the reward volume is between 1.5 and 3 uL for correct trials, 0 for incorrect. 

1311 

1312 Metric: 

1313 M = reward volume 

1314 

1315 Criterion: 

1316 1.5 <= M <= 3 if correct else M == 0 

1317 

1318 Units: 

1319 uL 

1320 

1321 :param data: dict of trial data with keys ('rewardVolume', 'correct', 'intervals') 

1322 """ 

1323 metric = data['rewardVolume'] 1aycbdfe

1324 correct = data['correct'] 1aycbdfe

1325 passed = np.zeros_like(metric, dtype=bool) 1aycbdfe

1326 # Check correct trials within correct range 

1327 passed[correct] = (1.5 <= metric[correct]) & (metric[correct] <= 3.) 1aycbdfe

1328 # Check incorrect trials are 0 

1329 passed[~correct] = metric[~correct] == 0 1aycbdfe

1330 assert data['intervals'].shape[0] == len(metric) == len(passed) 1aycbdfe

1331 return metric, passed 1aycbdfe

1332 

1333 

1334def check_reward_volume_set(data, **_): 

1335 """Check that there is only two reward volumes within a session, one of which is 0. 

1336 

1337 Metric: 

1338 M = set(rewardVolume) 

1339 

1340 Criterion: 

1341 (0 < len(M) <= 2) and 0 in M 

1342 

1343 :param data: dict of trial data with keys ('rewardVolume') 

1344 """ 

1345 metric = data['rewardVolume'] 1aLcbdfe

1346 passed = 0 < len(set(metric)) <= 2 and 0. in metric 1aLcbdfe

1347 return metric, passed 1aLcbdfe

1348 

1349 

1350def check_wheel_integrity(data, re_encoding='X1', enc_res=None, **_): 

1351 """ 

1352 Check wheel position sampled at the expected resolution. 

1353 

1354 Check that the difference between wheel position samples is close to the encoder resolution 

1355 and that the wheel timestamps strictly increase. 

1356 

1357 Metric: 

1358 M = (absolute difference of the positions < 1.5 * encoder resolution) 

1359 + 1 if (difference of timestamps <= 0) else 0 

1360 

1361 Criterion: 

1362 M ~= 0 

1363 

1364 Units: 

1365 arbitrary (radians, sometimes + 1) 

1366 

1367 :param data: dict of wheel data with keys ('wheel_timestamps', 'wheel_position') 

1368 :param re_encoding: the encoding of the wheel data, X1, X2 or X4 

1369 :param enc_res: the rotary encoder resolution (default 1024 ticks per revolution) 

1370 

1371 Notes 

1372 ----- 

1373 - At high velocities some samples are missed due to the scanning frequency of the DAQ. 

1374 This checks for more than 1 missing sample in a row (i.e. the difference between samples >= 2) 

1375 

1376 """ 

1377 if isinstance(re_encoding, str): 1avcbdfe

1378 re_encoding = int(re_encoding[-1]) 1avcbdfe

1379 # The expected difference between samples in the extracted units 

1380 resolution = 1 / (enc_res or ephys_fpga.WHEEL_TICKS 1avcbdfe

1381 ) * np.pi * 2 * ephys_fpga.WHEEL_RADIUS_CM / re_encoding 

1382 # We expect the difference of neighbouring positions to be close to the resolution 

1383 pos_check = np.abs(np.diff(data['wheel_position'])) 1avcbdfe

1384 # Timestamps should be strictly increasing 

1385 ts_check = np.diff(data['wheel_timestamps']) <= 0. 1avcbdfe

1386 metric = pos_check + ts_check.astype(float) # all values should be close to zero 1avcbdfe

1387 passed = metric < 1.5 * resolution 1avcbdfe

1388 return metric, passed 1avcbdfe

1389 

1390 

1391# === Pre-stimulus checks === 

1392def check_stimulus_move_before_goCue(data, photodiode=None, **_): 

1393 """ 

1394 Check there are no stimulus events before the go cue tone. 

1395 

1396 Check that there are no visual stimulus change(s) between the start of the trial and the 

1397 go cue sound onset, except for stim on. 

1398 

1399 Metric: 

1400 M = number of visual stimulus change events between trial start and goCue_times 

1401 

1402 Criterion: 

1403 M == 1 

1404 

1405 Units: 

1406 -none-, integer 

1407 

1408 Parameters 

1409 ---------- 

1410 data : dict 

1411 Trial data with keys ('goCue_times', 'intervals', 'choice'). 

1412 photodiode : dict 

1413 The fronts from Bpod's BNC1 input or FPGA frame2ttl channel. 

1414 

1415 Returns 

1416 ------- 

1417 numpy.array 

1418 An array of metric values to threshold. 

1419 numpy.array 

1420 An array of boolean values, 1 per trial, where True means trial passes QC threshold. 

1421 

1422 Notes 

1423 ----- 

1424 - There should be exactly 1 stimulus change before goCue; stimulus onset. Even if the stimulus 

1425 contrast is 0, the sync square will still flip at stimulus onset, etc. 

1426 - If there are no goCue times (all are NaN), the status should be NOT_SET. 

1427 """ 

1428 if photodiode is None: 1acbdfe

1429 _log.warning('No photodiode TTL input in function call, returning None') 

1430 return None 

1431 photodiode_clean = ephys_fpga._clean_frame2ttl(photodiode) 1acbdfe

1432 s = photodiode_clean['times'] 1acbdfe

1433 s = s[~np.isnan(s)] # Remove NaNs 1acbdfe

1434 metric = np.array([]) 1acbdfe

1435 for i, c in zip(data['intervals'][:, 0], data['goCue_times']): 1acbdfe

1436 metric = np.append(metric, np.count_nonzero(s[s > i] < c)) 1acbdfe

1437 

1438 passed = (metric == 1).astype(float) 1acbdfe

1439 passed[np.isnan(data['goCue_times'])] = np.nan 1acbdfe

1440 assert data['intervals'].shape[0] == len(metric) == len(passed) 1acbdfe

1441 return metric, passed 1acbdfe

1442 

1443 

1444def check_audio_pre_trial(data, audio=None, **_): 

1445 """ 

1446 Check no audio stimuli before the go cue. 

1447 

1448 Check there are no audio outputs between the start of the trial and the go cue sound onset - 20 ms. 

1449 

1450 Metric: 

1451 M = sum(start_times < audio TTL < (goCue_times - 20ms)) 

1452 

1453 Criterion: 

1454 M == 0 

1455 

1456 Units: 

1457 -none-, integer 

1458 

1459 Parameters 

1460 ---------- 

1461 data : dict 

1462 Trial data with keys ('goCue_times', 'intervals'). 

1463 audio : dict 

1464 The fronts from Bpod's BNC2 input FPGA audio sync channel. 

1465 

1466 Returns 

1467 ------- 

1468 numpy.array 

1469 An array of metric values to threshold. 

1470 numpy.array 

1471 An array of boolean values, 1 per trial, where True means trial passes QC threshold. 

1472 """ 

1473 if audio is None: 1awcbdfe

1474 _log.warning('No BNC2 input in function call, retuning None') 

1475 return None, None 

1476 s = audio['times'][~np.isnan(audio['times'])] # Audio TTLs with NaNs removed 1awcbdfe

1477 metric = np.array([], dtype=np.int8) 1awcbdfe

1478 for i, c in zip(data['intervals'][:, 0], data['goCue_times']): 1awcbdfe

1479 metric = np.append(metric, sum(s[s > i] < (c - 0.02))) 1awcbdfe

1480 passed = metric == 0 1awcbdfe

1481 assert data['intervals'].shape[0] == len(metric) == len(passed) 1awcbdfe

1482 return metric, passed 1awcbdfe