Coverage for ibllib/io/extractors/ephys_fpga.py: 92%
560 statements
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-17 15:25 +0000
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-17 15:25 +0000
1"""Data extraction from raw FPGA output.
3The behaviour extraction happens in the following stages:
5 1. The NI DAQ events are extracted into a map of event times and TTL polarities.
6 2. The Bpod trial events are extracted from the raw Bpod data, depending on the task protocol.
7 3. As protocols may be chained together within a given recording, the period of a given task
8 protocol is determined using the 'spacer' DAQ signal (see `get_protocol_period`).
9 4. Physical behaviour events such as stim on and reward time are separated out by TTL length or
10 sequence within the trial.
11 5. The Bpod clock is sync'd with the FPGA using one of the extracted trial events.
12 6. The Bpod software events are then converted to FPGA time.
14Examples
15--------
16For simple extraction, use the FPGATrials class:
18>>> extractor = FpgaTrials(session_path)
19>>> trials, _ = extractor.extract(update=False, save=False)
21Notes
22-----
23Sync extraction in this module only supports FPGA data acquired with an NI DAQ as part of a
24Neuropixels recording system, however a sync and channel map extracted from a different DAQ format
25can be passed to the FpgaTrials class.
27See Also
28--------
29For dynamic pipeline sessions it is best to call the extractor via the BehaviorTask class.
31TODO notes on subclassing various methods of FpgaTrials for custom hardware.
32"""
33import logging
34from itertools import cycle
35from pathlib import Path
36import uuid
37import re
38from functools import partial
40import matplotlib.pyplot as plt
41from matplotlib.colors import TABLEAU_COLORS
42import numpy as np
43from packaging import version
45import spikeglx
46import ibldsp.utils
47import one.alf.io as alfio
48from one.alf.path import filename_parts
49from iblutil.util import Bunch
50from iblutil.spacer import Spacer
52import ibllib.exceptions as err
53from ibllib.io import raw_data_loaders as raw, session_params
54from ibllib.io.extractors.bpod_trials import get_bpod_extractor
55import ibllib.io.extractors.base as extractors_base
56from ibllib.io.extractors.training_wheel import extract_wheel_moves
57from ibllib import plots
58from ibllib.io.extractors.default_channel_maps import DEFAULT_MAPS
60_logger = logging.getLogger(__name__)
62SYNC_BATCH_SIZE_SECS = 100
63"""int: Number of samples to read at once in bin file for sync."""
65WHEEL_RADIUS_CM = 1 # stay in radians
66"""float: The radius of the wheel used in the task. A value of 1 ensures units remain in radians."""
68WHEEL_TICKS = 1024
69"""int: The number of encoder pulses per channel for one complete rotation."""
71BPOD_FPGA_DRIFT_THRESHOLD_PPM = 150
72"""int: Logs a warning if Bpod to FPGA clock drift is higher than this value."""
74CHMAPS = {'3A':
75 {'ap':
76 {'left_camera': 2,
77 'right_camera': 3,
78 'body_camera': 4,
79 'bpod': 7,
80 'frame2ttl': 12,
81 'rotary_encoder_0': 13,
82 'rotary_encoder_1': 14,
83 'audio': 15
84 }
85 },
86 '3B':
87 {'nidq':
88 {'left_camera': 0,
89 'right_camera': 1,
90 'body_camera': 2,
91 'imec_sync': 3,
92 'frame2ttl': 4,
93 'rotary_encoder_0': 5,
94 'rotary_encoder_1': 6,
95 'audio': 7,
96 'bpod': 16,
97 'laser': 17,
98 'laser_ttl': 18},
99 'ap':
100 {'imec_sync': 6}
101 },
102 }
103"""dict: The default channel indices corresponding to various devices for different recording systems."""
106def data_for_keys(keys, data):
107 """Check keys exist in 'data' dict and contain values other than None."""
108 return data is not None and all(k in data and data.get(k, None) is not None for k in keys) 1fMQNOCDPtYReblmnkd
111def get_ibl_sync_map(ef, version):
112 """
113 Gets default channel map for the version/binary file type combination
114 :param ef: ibllib.io.spikeglx.glob_ephys_file dictionary with field 'ap' or 'nidq'
115 :return: channel map dictionary
116 """
117 # Determine default channel map
118 if version == '3A': 1ZIJEFfpqrMNOCDPtGHYRaecgblmnkjdh
119 default_chmap = CHMAPS['3A']['ap'] 1ZYaeclmnd
120 elif version == '3B': 1ZIJEFfpqrMNOCDPtGHRagbkjdh
121 if ef.get('nidq', None): 1ZIJEFfpqrMNOCDPtGHRagbkjdh
122 default_chmap = CHMAPS['3B']['nidq'] 1ZIJEFfpqrMNOCDPtGHRagbkjdh
123 elif ef.get('ap', None): 1ZpqrRkj
124 default_chmap = CHMAPS['3B']['ap'] 1ZpqrRkj
125 # Try to load channel map from file
126 chmap = spikeglx.get_sync_map(ef['path']) 1ZIJEFfpqrMNOCDPtGHYRaecgblmnkjdh
127 # If chmap provided but not with all keys, fill up with default values
128 if not chmap: 1ZIJEFfpqrMNOCDPtGHYRaecgblmnkjdh
129 return default_chmap 1ZIJEFpqrGHYRacgkjh
130 else:
131 if data_for_keys(default_chmap.keys(), chmap): 1fMNOCDPtYReblmnkd
132 return chmap 1ftYReblmnkd
133 else:
134 _logger.warning("Keys missing from provided channel map, " 1MNOCDPYlmd
135 "setting missing keys from default channel map")
136 return {**default_chmap, **chmap} 1MNOCDPYlmd
139def _sync_to_alf(raw_ephys_apfile, output_path=None, save=False, parts=''):
140 """
141 Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
143 :param raw_ephys_apfile: bin file containing ephys data or spike
144 :param output_path: output directory
145 :param save: bool write to disk only if True
146 :param parts: string or list of strings that will be appended to the filename before extension
147 :return:
148 """
149 # handles input argument: support ibllib.io.spikeglx.Reader, str and pathlib.Path
150 if isinstance(raw_ephys_apfile, spikeglx.Reader): 1wxypqrzABj
151 sr = raw_ephys_apfile 1wxypqrzABj
152 else:
153 raw_ephys_apfile = Path(raw_ephys_apfile)
154 sr = spikeglx.Reader(raw_ephys_apfile)
155 if not (opened := sr.is_open): 1wxypqrzABj
156 sr.open()
157 # if no output, need a temp folder to swap for big files
158 if not output_path: 1wxypqrzABj
159 output_path = raw_ephys_apfile.parent
160 file_ftcp = Path(output_path).joinpath(f'fronts_times_channel_polarity{uuid.uuid4()}.bin') 1wxypqrzABj
162 # loop over chunks of the raw ephys file
163 wg = ibldsp.utils.WindowGenerator(sr.ns, int(SYNC_BATCH_SIZE_SECS * sr.fs), overlap=1) 1wxypqrzABj
164 fid_ftcp = open(file_ftcp, 'wb') 1wxypqrzABj
165 for sl in wg.slice: 1wxypqrzABj
166 ss = sr.read_sync(sl) 1wxypqrzABj
167 ind, fronts = ibldsp.utils.fronts(ss, axis=0) 1wxypqrzABj
168 # a = sr.read_sync_analog(sl)
169 sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)] 1wxypqrzABj
170 sav.tofile(fid_ftcp) 1wxypqrzABj
171 # close temp file, read from it and delete
172 fid_ftcp.close() 1wxypqrzABj
173 tim_chan_pol = np.fromfile(str(file_ftcp)) 1wxypqrzABj
174 tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3)) 1wxypqrzABj
175 file_ftcp.unlink() 1wxypqrzABj
176 sync = {'times': tim_chan_pol[:, 0], 1wxypqrzABj
177 'channels': tim_chan_pol[:, 1],
178 'polarities': tim_chan_pol[:, 2]}
179 # If opened Reader was passed into function, leave open
180 if not opened: 1wxypqrzABj
181 sr.close()
182 if save: 1wxypqrzABj
183 out_files = alfio.save_object_npy(output_path, sync, 'sync', 1wxypqrzABj
184 namespace='spikeglx', parts=parts)
185 return Bunch(sync), out_files 1wxypqrzABj
186 else:
187 return Bunch(sync)
190def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4'):
191 """
192 Extracts the rotary encoder absolute position as function of time from fronts detected
193 on the 2 channels. Outputs in units of radius parameters, by default radians
194 Coding options detailed here: http://www.ni.com/tutorial/7109/pt/
195 Here output is clockwise from subject perspective
197 :param ta: time of fronts on channel A
198 :param pa: polarity of fronts on channel A
199 :param tb: time of fronts on channel B
200 :param pb: polarity of fronts on channel B
201 :param ticks: number of ticks corresponding to a full revolution (1024 for IBL rotary encoder)
202 :param radius: radius of the wheel. Defaults to 1 for an output in radians
203 :param coding: x1, x2 or x4 coding (IBL default is x4)
204 :return: indices vector (ta) and position vector
205 """
206 if coding == 'x1': 16V70Kfaecgblmnkjdh
207 ia = np.searchsorted(tb, ta[pa == 1]) 1VK
208 ia = ia[ia < ta.size] 1VK
209 ia = ia[pa[ia] == 1] 1VK
210 ib = np.searchsorted(ta, tb[pb == 1]) 1VK
211 ib = ib[ib < tb.size] 1VK
212 ib = ib[pb[ib] == 1] 1VK
213 t = np.r_[ta[ia], tb[ib]] 1VK
214 p = np.r_[ia * 0 + 1, ib * 0 - 1] 1VK
215 ordre = np.argsort(t) 1VK
216 t = t[ordre] 1VK
217 p = p[ordre] 1VK
218 p = np.cumsum(p) / ticks * np.pi * 2 * radius 1VK
219 return t, p 1VK
220 elif coding == 'x2': 1670Kfaecgblmnkjdh
221 p = pb[np.searchsorted(tb, ta) - 1] * pa 167K
222 p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 2 167K
223 return ta, p 167K
224 elif coding == 'x4': 10Kfaecgblmnkjdh
225 p = np.r_[pb[np.searchsorted(tb, ta) - 1] * pa, -pa[np.searchsorted(ta, tb) - 1] * pb] 10Kfaecgblmnkjdh
226 t = np.r_[ta, tb] 10Kfaecgblmnkjdh
227 ordre = np.argsort(t) 10Kfaecgblmnkjdh
228 t = t[ordre] 10Kfaecgblmnkjdh
229 p = p[ordre] 10Kfaecgblmnkjdh
230 p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 4 10Kfaecgblmnkjdh
231 return t, p 10Kfaecgblmnkjdh
234def _assign_events_to_trial(t_trial_start, t_event, take='last', t_trial_end=None):
235 """
236 Assign events to a trial given trial start times and event times.
238 Trials without an event result in nan value in output time vector.
239 The output has a consistent size with t_trial_start and ready to output to alf.
241 Parameters
242 ----------
243 t_trial_start : numpy.array
244 An array of start times, used to bin edges for assigning values from `t_event`.
245 t_event : numpy.array
246 An array of event times to assign to trials.
247 take : str {'first', 'last'}, int
248 'first' takes first event > t_trial_start; 'last' takes last event < the next
249 t_trial_start; an int defines the index to take for events within trial bounds. The index
250 may be negative.
251 t_trial_end : numpy.array
252 Optional array of end times, used to bin edges for assigning values from `t_event`.
254 Returns
255 -------
256 numpy.array
257 An array the length of `t_trial_start` containing values from `t_event`. Unassigned values
258 are replaced with np.nan.
260 See Also
261 --------
262 FpgaTrials._assign_events - Assign trial events based on TTL length.
263 """
264 # make sure the events are sorted
265 try: 1Lfiaecgbdh
266 assert np.all(np.diff(t_trial_start) >= 0) 1Lfiaecgbdh
267 except AssertionError: 1L
268 raise ValueError('Trial starts vector not sorted') 1L
269 try: 1Lfiaecgbdh
270 assert np.all(np.diff(t_event) >= 0) 1Lfiaecgbdh
271 except AssertionError: 1L
272 raise ValueError('Events vector is not sorted') 1L
274 # remove events that happened before the first trial start
275 remove = t_event < t_trial_start[0] 1Lfiaecgbdh
276 if t_trial_end is not None: 1Lfiaecgbdh
277 if not np.all(np.diff(t_trial_end) >= 0): 1fiaecgbdh
278 raise ValueError('Trial end vector not sorted')
279 if not np.all(t_trial_end[:-1] < t_trial_start[1:]): 1fiaecgbdh
280 raise ValueError('Trial end times must not overlap with trial start times')
281 # remove events between end and next start, and after last end
282 remove |= t_event > t_trial_end[-1] 1fiaecgbdh
283 for e, s in zip(t_trial_end[:-1], t_trial_start[1:]): 1fiaecgbdh
284 remove |= np.logical_and(s > t_event, t_event >= e) 1fiaecgbdh
285 t_event = t_event[~remove] 1Lfiaecgbdh
286 ind = np.searchsorted(t_trial_start, t_event) - 1 1Lfiaecgbdh
287 t_event_nans = np.zeros_like(t_trial_start) * np.nan 1Lfiaecgbdh
288 # select first or last element matching each trial start
289 if take == 'last': 1Lfiaecgbdh
290 iall, iu = np.unique(np.flip(ind), return_index=True) 1Lfiaecgbdh
291 t_event_nans[iall] = t_event[- (iu - ind.size + 1)] 1Lfiaecgbdh
292 elif take == 'first': 1Lfiaecgbdh
293 iall, iu = np.unique(ind, return_index=True) 1Lfiaecgbdh
294 t_event_nans[iall] = t_event[iu] 1Lfiaecgbdh
295 else: # if the index is arbitrary, needs to be numeric (could be negative if from the end)
296 iall = np.unique(ind) 1La
297 minsize = take + 1 if take >= 0 else - take 1La
298 # for each trial, take the take nth element if there are enough values in trial
299 for iu in iall: 1La
300 match = t_event[iu == ind] 1La
301 if len(match) >= minsize: 1La
302 t_event_nans[iu] = match[take] 1La
303 return t_event_nans 1Lfiaecgbdh
306def get_sync_fronts(sync, channel_nb, tmin=None, tmax=None):
307 """
308 Return the sync front polarities and times for a given channel.
310 Parameters
311 ----------
312 sync : dict
313 'polarities' of fronts detected on sync trace for all 16 channels and their 'times'.
314 channel_nb : int
315 The integer corresponding to the desired sync channel.
316 tmin : float
317 The minimum time from which to extract the sync pulses.
318 tmax : float
319 The maximum time up to which we extract the sync pulses.
321 Returns
322 -------
323 Bunch
324 Channel times and polarities.
325 """
326 selection = sync['channels'] == channel_nb 1vusIJEFfpqriMQSTUNOCDPtGHRaecgblmnkjdh
327 selection = np.logical_and(selection, sync['times'] <= tmax) if tmax else selection 1vusIJEFfpqriMQSTUNOCDPtGHRaecgblmnkjdh
328 selection = np.logical_and(selection, sync['times'] >= tmin) if tmin else selection 1vusIJEFfpqriMQSTUNOCDPtGHRaecgblmnkjdh
329 return Bunch({'times': sync['times'][selection], 1vusIJEFfpqriMQSTUNOCDPtGHRaecgblmnkjdh
330 'polarities': sync['polarities'][selection]})
333def _clean_audio(audio, display=False):
334 """
335 one guy wired the 150 Hz camera output onto the soundcard. The effect is to get 150 Hz periodic
336 square pulses, 2ms up and 4.666 ms down. When this happens we remove all of the intermediate
337 pulses to repair the audio trace
338 Here is some helper code
339 dd = np.diff(audio['times'])
340 1 / np.median(dd[::2]) # 2ms up
341 1 / np.median(dd[1::2]) # 4.666 ms down
342 1 / (np.median(dd[::2]) + np.median(dd[1::2])) # both sum to 150 Hz
343 This only runs on sessions when the bug is detected and leaves others untouched
344 """
345 DISCARD_THRESHOLD = 0.01 1vXfiaecgbdh
346 average_150_hz = np.mean(1 / np.diff(audio['times'][audio['polarities'] == 1]) > 140) 1vXfiaecgbdh
347 naudio = audio['times'].size 1vXfiaecgbdh
348 if average_150_hz > 0.7 and naudio > 100: 1vXfiaecgbdh
349 _logger.warning('Soundcard signal on FPGA seems to have been mixed with 150Hz camera') 1X
350 keep_ind = np.r_[np.diff(audio['times']) > DISCARD_THRESHOLD, False] 1X
351 keep_ind = np.logical_and(keep_ind, audio['polarities'] == -1) 1X
352 keep_ind = np.where(keep_ind)[0] 1X
353 keep_ind = np.sort(np.r_[0, keep_ind, keep_ind + 1, naudio - 1]) 1X
355 if display: # pragma: no cover 1X
356 from ibllib.plots import squares
357 squares(audio['times'], audio['polarities'], ax=None, yrange=[-1, 1])
358 squares(audio['times'][keep_ind], audio['polarities'][keep_ind], yrange=[-1, 1])
359 audio = {'times': audio['times'][keep_ind], 1X
360 'polarities': audio['polarities'][keep_ind]}
361 return audio 1vXfiaecgbdh
364def _clean_frame2ttl(frame2ttl, threshold=0.01, display=False):
365 """
366 Clean the frame2ttl events.
368 Frame 2ttl calibration can be unstable and the fronts may be flickering at an unrealistic
369 pace. This removes the consecutive frame2ttl pulses happening too fast, below a threshold
370 of F2TTL_THRESH.
372 Parameters
373 ----------
374 frame2ttl : dict
375 A dictionary of frame2TTL events, with keys {'times', 'polarities'}.
376 threshold : float
377 Consecutive pulses occurring with this many seconds ignored.
378 display : bool
379 If true, plots the input TTLs and the cleaned output.
381 Returns
382 -------
384 """
385 dt = np.diff(frame2ttl['times']) 1o2EFf3itGHaecgb45dh
386 iko = np.where(np.logical_and(dt < threshold, frame2ttl['polarities'][:-1] == -1))[0] 1o2EFf3itGHaecgb45dh
387 iko = np.unique(np.r_[iko, iko + 1]) 1o2EFf3itGHaecgb45dh
388 frame2ttl_ = {'times': np.delete(frame2ttl['times'], iko), 1o2EFf3itGHaecgb45dh
389 'polarities': np.delete(frame2ttl['polarities'], iko)}
390 if iko.size > (0.1 * frame2ttl['times'].size): 1o2EFf3itGHaecgb45dh
391 _logger.warning(f'{iko.size} ({iko.size / frame2ttl["times"].size:.2%}) ' 12ie
392 f'frame to TTL polarity switches below {threshold} secs')
393 if display: # pragma: no cover 1o2EFf3itGHaecgb45dh
394 fig, (ax0, ax1) = plt.subplots(2, sharex=True)
395 plots.squares(frame2ttl['times'] * 1000, frame2ttl['polarities'], yrange=[0.1, 0.9], ax=ax0)
396 plots.squares(frame2ttl_['times'] * 1000, frame2ttl_['polarities'], yrange=[1.1, 1.9], ax=ax1)
397 import seaborn as sns
398 sns.displot(dt[dt < 0.05], binwidth=0.0005)
400 return frame2ttl_ 1o2EFf3itGHaecgb45dh
403def extract_wheel_sync(sync, chmap=None, tmin=None, tmax=None):
404 """
405 Extract wheel positions and times from sync fronts dictionary for all 16 channels.
406 Output position is in radians, mathematical convention.
408 Parameters
409 ----------
410 sync : dict
411 'polarities' of fronts detected on sync trace for all 16 chans and their 'times'
412 chmap : dict
413 Map of channel names and their corresponding index. Default to constant.
414 tmin : float
415 The minimum time from which to extract the sync pulses.
416 tmax : float
417 The maximum time up to which we extract the sync pulses.
419 Returns
420 -------
421 numpy.array
422 Wheel timestamps in seconds.
423 numpy.array
424 Wheel positions in radians.
425 """
426 # Assume two separate edge count channels
427 assert chmap.keys() >= {'rotary_encoder_0', 'rotary_encoder_1'} 1faecgblmnkjdh
428 channela = get_sync_fronts(sync, chmap['rotary_encoder_0'], tmin=tmin, tmax=tmax) 1faecgblmnkjdh
429 channelb = get_sync_fronts(sync, chmap['rotary_encoder_1'], tmin=tmin, tmax=tmax) 1faecgblmnkjdh
430 re_ts, re_pos = _rotary_encoder_positions_from_fronts( 1faecgblmnkjdh
431 channela['times'], channela['polarities'], channelb['times'], channelb['polarities'],
432 ticks=WHEEL_TICKS, radius=WHEEL_RADIUS_CM, coding='x4')
433 return re_ts, re_pos 1faecgblmnkjdh
436def extract_sync(session_path, overwrite=False, ephys_files=None, namespace='spikeglx'):
437 """
438 Reads ephys binary file (s) and extract sync within the binary file folder
439 Assumes ephys data is within a `raw_ephys_data` folder
441 :param session_path: '/path/to/subject/yyyy-mm-dd/001'
442 :param overwrite: Bool on re-extraction, forces overwrite instead of loading existing files
443 :return: list of sync dictionaries
444 """
445 session_path = Path(session_path) 1wxypqrzABWlmnkj
446 if not ephys_files: 1wxypqrzABWlmnkj
447 ephys_files = spikeglx.glob_ephys_files(session_path) 1wxyWlmnkj
448 syncs = [] 1wxypqrzABWlmnkj
449 outputs = [] 1wxypqrzABWlmnkj
450 for efi in ephys_files: 1wxypqrzABWlmnkj
451 bin_file = efi.get('ap', efi.get('nidq', None)) 1wxypqrzABlmnkj
452 if not bin_file: 1wxypqrzABlmnkj
453 continue
454 alfname = dict(object='sync', namespace=namespace) 1wxypqrzABlmnkj
455 if efi.label: 1wxypqrzABlmnkj
456 alfname['extra'] = efi.label 1pqrlmnkj
457 file_exists = alfio.exists(bin_file.parent, **alfname) 1wxypqrzABlmnkj
458 if not overwrite and file_exists: 1wxypqrzABlmnkj
459 _logger.warning(f'Skipping raw sync: SGLX sync found for {efi.label}!') 1wxylmnk
460 sync = alfio.load_object(bin_file.parent, **alfname) 1wxylmnk
461 out_files, _ = alfio._ls(bin_file.parent, **alfname) 1wxylmnk
462 else:
463 sr = spikeglx.Reader(bin_file) 1wxypqrzABj
464 sync, out_files = _sync_to_alf(sr, bin_file.parent, save=True, parts=efi.label) 1wxypqrzABj
465 sr.close() 1wxypqrzABj
466 outputs.extend(out_files) 1wxypqrzABlmnkj
467 syncs.extend([sync]) 1wxypqrzABlmnkj
469 return syncs, outputs 1wxypqrzABWlmnkj
472def _get_all_probes_sync(session_path, bin_exists=True):
473 # round-up of all bin ephys files in the session, infer revision and get sync map
474 ephys_files = spikeglx.glob_ephys_files(session_path, bin_exists=bin_exists) 1IJCDaecWlmnkjd
475 version = spikeglx.get_neuropixel_version_from_files(ephys_files) 1IJCDaecWlmnkjd
476 # attach the sync information to each binary file found
477 for ef in ephys_files: 1IJCDaecWlmnkjd
478 ef['sync'] = alfio.load_object(ef.path, 'sync', namespace='spikeglx', short_keys=True) 1IJCDaeclmnkjd
479 ef['sync_map'] = get_ibl_sync_map(ef, version) 1IJCDaeclmnkjd
480 return ephys_files 1IJCDaecWlmnkjd
483def get_wheel_positions(sync, chmap, tmin=None, tmax=None):
484 """
485 Gets the wheel position from synchronisation pulses
487 Parameters
488 ----------
489 sync : dict
490 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
491 the corresponding channel numbers.
492 chmap : dict[str, int]
493 A map of channel names and their corresponding indices.
494 tmin : float
495 The minimum time from which to extract the sync pulses.
496 tmax : float
497 The maximum time up to which we extract the sync pulses.
499 Returns
500 -------
501 Bunch
502 A dictionary with keys ('timestamps', 'position'), containing the wheel event timestamps and
503 position in radians
504 Bunch
505 A dictionary of detected movement times with keys ('intervals', 'peakAmplitude', 'peakVelocity_times').
506 """
507 ts, pos = extract_wheel_sync(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax) 1faecgbdh
508 moves = Bunch(extract_wheel_moves(ts, pos)) 1faecgbdh
509 wheel = Bunch({'timestamps': ts, 'position': pos}) 1faecgbdh
510 return wheel, moves 1faecgbdh
513def get_main_probe_sync(session_path, bin_exists=False):
514 """
515 From 3A or 3B multiprobe session, returns the main probe (3A) or nidq sync pulses
516 with the attached channel map (default chmap if none)
518 Parameters
519 ----------
520 session_path : str, pathlib.Path
521 The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
522 bin_exists : bool
523 Whether there is a .bin file present.
525 Returns
526 -------
527 one.alf.io.AlfBunch
528 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
529 the corresponding channel numbers.
530 dict
531 A map of channel names and their corresponding indices.
532 """
533 ephys_files = _get_all_probes_sync(session_path, bin_exists=bin_exists) 1IJCDaecWlmnkjd
534 if not ephys_files: 1IJCDaecWlmnkjd
535 raise FileNotFoundError(f"No ephys files found in {session_path}") 1W
536 version = spikeglx.get_neuropixel_version_from_files(ephys_files) 1IJCDaeclmnkjd
537 if version == '3A': 1IJCDaeclmnkjd
538 # the sync master is the probe with the most sync pulses
539 sync_box_ind = np.argmax([ef.sync.times.size for ef in ephys_files]) 1aeclmnd
540 elif version == '3B': 1IJCDkj
541 # the sync master is the nidq breakout box
542 sync_box_ind = np.argmax([1 if ef.get('nidq') else 0 for ef in ephys_files]) 1IJCDkj
543 sync = ephys_files[sync_box_ind].sync 1IJCDaeclmnkjd
544 sync_chmap = ephys_files[sync_box_ind].sync_map 1IJCDaeclmnkjd
545 return sync, sync_chmap 1IJCDaeclmnkjd
548def get_protocol_period(session_path, protocol_number, bpod_sync, exclude_empty_periods=True):
549 """
550 Return the start and end time of the protocol number.
552 Note that the start time is the start of the spacer pulses and the end time is either None
553 if the protocol is the final one, or the start of the next spacer.
555 Parameters
556 ----------
557 session_path : str, pathlib.Path
558 The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
559 protocol_number : int
560 The order that the protocol was run in, counted from 0.
561 bpod_sync : dict
562 The sync times and polarities for Bpod BNC1.
563 exclude_empty_periods : bool
564 When true, spacers are ignored if no bpod pulses are detected between periods.
566 Returns
567 -------
568 float
569 The time of the detected spacer for the protocol number.
570 float, None
571 The time of the next detected spacer or None if this is the last protocol run.
572 """
573 # The spacers are TTLs generated by Bpod at the start of each protocol
574 sp = Spacer() 1t
575 spacer_times = sp.find_spacers_from_fronts(bpod_sync) 1t
576 if exclude_empty_periods: 1t
577 # Drop dud protocol spacers (those without any bpod pulses after the spacer)
578 spacer_length = len(sp.generate_template(fs=1000)) / 1000 1t
579 periods = np.c_[spacer_times + spacer_length, np.r_[spacer_times[1:], np.inf]] 1t
580 valid = [np.any((bpod_sync['times'] > pp[0]) & (bpod_sync['times'] < pp[1])) for pp in periods] 1t
581 spacer_times = spacer_times[valid] 1t
582 # Ensure that the number of detected spacers matched the number of expected tasks
583 if acquisition_description := session_params.read_params(session_path): 1t
584 n_tasks = len(acquisition_description.get('tasks', [])) 1t
585 assert len(spacer_times) >= protocol_number, (f'expected {n_tasks} spacers, found only {len(spacer_times)} - ' 1t
586 f'can not return protocol number {protocol_number}.')
587 assert n_tasks > protocol_number >= 0, f'protocol number must be between 0 and {n_tasks}' 1t
588 else:
589 assert protocol_number < len(spacer_times)
590 start = spacer_times[int(protocol_number)] 1t
591 end = None if len(spacer_times) - 1 == protocol_number else spacer_times[int(protocol_number + 1)] 1t
592 return start, end 1t
595class FpgaTrials(extractors_base.BaseExtractor):
596 save_names = ('_ibl_trials.goCueTrigger_times.npy', '_ibl_trials.stimOnTrigger_times.npy',
597 '_ibl_trials.stimOffTrigger_times.npy', None, None, None, None, None,
598 '_ibl_trials.stimOff_times.npy', None, None, None, '_ibl_trials.quiescencePeriod.npy',
599 '_ibl_trials.table.pqt', '_ibl_wheel.timestamps.npy',
600 '_ibl_wheel.position.npy', '_ibl_wheelMoves.intervals.npy',
601 '_ibl_wheelMoves.peakAmplitude.npy', None)
602 var_names = ('goCueTrigger_times', 'stimOnTrigger_times',
603 'stimOffTrigger_times', 'stimFreezeTrigger_times', 'errorCueTrigger_times',
604 'errorCue_times', 'itiIn_times', 'stimFreeze_times', 'stimOff_times',
605 'valveOpen_times', 'phase', 'position', 'quiescence', 'table',
606 'wheel_timestamps', 'wheel_position',
607 'wheelMoves_intervals', 'wheelMoves_peakAmplitude', 'wheelMoves_peakVelocity_times')
609 bpod_rsync_fields = ('intervals', 'response_times', 'goCueTrigger_times',
610 'stimOnTrigger_times', 'stimOffTrigger_times',
611 'stimFreezeTrigger_times', 'errorCueTrigger_times')
612 """tuple of str: Fields from Bpod extractor that we want to re-sync to FPGA."""
614 bpod_fields = ('feedbackType', 'choice', 'rewardVolume', 'contrastLeft', 'contrastRight',
615 'probabilityLeft', 'phase', 'position', 'quiescence')
616 """tuple of str: Fields from bpod extractor that we want to save."""
618 sync_field = 'intervals_0' # trial start events
619 """str: The trial event to synchronize (must be present in extracted trials)."""
621 bpod = None
622 """dict of numpy.array: The Bpod out TTLs recorded on the DAQ. Used in the QC viewer plot."""
624 def __init__(self, *args, bpod_trials=None, bpod_extractor=None, **kwargs):
625 """An extractor for ephysChoiceWorld trials data, in FPGA time.
627 This class may be subclassed to handle moderate variations in hardware and task protocol,
628 however there is flexible
629 """
630 super().__init__(*args, **kwargs) 11vusf89iaecgblmnkjdh
631 self.bpod2fpga = None 11vusf89iaecgblmnkjdh
632 self.bpod_trials = bpod_trials 11vusf89iaecgblmnkjdh
633 self.frame2ttl = self.audio = self.bpod = self.settings = None 11vusf89iaecgblmnkjdh
634 if bpod_extractor: 11vusf89iaecgblmnkjdh
635 self.bpod_extractor = bpod_extractor 1fiaecgbdh
636 self._update_var_names() 1fiaecgbdh
638 def _update_var_names(self, bpod_fields=None, bpod_rsync_fields=None):
639 """
640 Updates this object's attributes based on the Bpod trials extractor.
642 Fields updated: bpod_fields, bpod_rsync_fields, save_names, and var_names.
644 Parameters
645 ----------
646 bpod_fields : tuple
647 A set of Bpod trials fields to keep.
648 bpod_rsync_fields : tuple
649 A set of Bpod trials fields to sync to the DAQ times.
650 """
651 if self.bpod_extractor: 1fiaecgbdh
652 for var_name, save_name in zip(self.bpod_extractor.var_names, self.bpod_extractor.save_names): 1fiaecgbdh
653 if var_name not in self.var_names: 1fiaecgbdh
654 self.var_names += (var_name,) 1fiaecgbdh
655 self.save_names += (save_name,) 1fiaecgbdh
657 # self.var_names = self.bpod_extractor.var_names
658 # self.save_names = self.bpod_extractor.save_names
659 self.settings = self.bpod_extractor.settings # This is used by the TaskQC 1fiaecgbdh
660 self.bpod_rsync_fields = bpod_rsync_fields 1fiaecgbdh
661 if self.bpod_rsync_fields is None: 1fiaecgbdh
662 self.bpod_rsync_fields = tuple(self._time_fields(self.bpod_extractor.var_names)) 1fiaecgbdh
663 if 'table' in self.bpod_extractor.var_names: 1fiaecgbdh
664 if not self.bpod_trials: 1fiaecgbdh
665 self.bpod_trials = self.bpod_extractor.extract(save=False)
666 table_keys = alfio.AlfBunch.from_df(self.bpod_trials['table']).keys() 1fiaecgbdh
667 self.bpod_rsync_fields += tuple(self._time_fields(table_keys)) 1fiaecgbdh
668 elif bpod_rsync_fields:
669 self.bpod_rsync_fields = bpod_rsync_fields
670 excluded = (*self.bpod_rsync_fields, 'table') 1fiaecgbdh
671 if bpod_fields: 1fiaecgbdh
672 assert not set(self.bpod_fields).intersection(excluded), 'bpod_fields must not also be bpod_rsync_fields'
673 self.bpod_fields = bpod_fields
674 elif self.bpod_extractor: 1fiaecgbdh
675 self.bpod_fields = tuple(x for x in self.bpod_extractor.var_names if x not in excluded) 1fiaecgbdh
676 if 'table' in self.bpod_extractor.var_names: 1fiaecgbdh
677 if not self.bpod_trials: 1fiaecgbdh
678 self.bpod_trials = self.bpod_extractor.extract(save=False)
679 table_keys = alfio.AlfBunch.from_df(self.bpod_trials['table']).keys() 1fiaecgbdh
680 self.bpod_fields += tuple([x for x in table_keys if x not in excluded]) 1fiaecgbdh
682 @staticmethod
683 def _time_fields(trials_attr) -> set:
684 """
685 Iterates over Bpod trials attributes returning those that correspond to times for syncing.
687 Parameters
688 ----------
689 trials_attr : iterable of str
690 The Bpod field names.
692 Returns
693 -------
694 set
695 The field names that contain timestamps.
696 """
697 FIELDS = ('times', 'timestamps', 'intervals') 1!fiaecgbdh
698 pattern = re.compile(fr'^[_\w]*({"|".join(FIELDS)})[_\w]*$') 1!fiaecgbdh
699 return set(filter(pattern.match, trials_attr)) 1!fiaecgbdh
701 def load_sync(self, sync_collection='raw_ephys_data', **kwargs):
702 """Load the DAQ sync and channel map data.
704 This method may be subclassed for novel DAQ systems. The sync must contain the following
705 keys: 'times' - an array timestamps in seconds; 'polarities' - an array of {-1, 1}
706 corresponding to TTL LOW and TTL HIGH, respectively; 'channels' - an array of ints
707 corresponding to channel number.
709 Parameters
710 ----------
711 sync_collection : str
712 The session subdirectory where the sync data are located.
713 kwargs
714 Optional arguments used by subclass methods.
716 Returns
717 -------
718 one.alf.io.AlfBunch
719 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
720 and the corresponding channel numbers.
721 dict
722 A map of channel names and their corresponding indices.
723 """
724 return get_sync_and_chn_map(self.session_path, sync_collection) 1c
726 def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data',
727 task_collection='raw_behavior_data', **kwargs) -> dict:
728 """Extracts ephys trials by combining Bpod and FPGA sync pulses.
730 It is essential that the `var_names`, `bpod_rsync_fields`, `bpod_fields`, and `sync_field`
731 attributes are all correct for the bpod protocol used.
733 Below are the steps involved:
734 0. Load sync and bpod trials, if required.
735 1. Determine protocol period and discard sync events outside the task.
736 2. Classify multiplexed TTL events based on length (see :meth:`FpgaTrials.build_trials`).
737 3. Sync the Bpod clock to the DAQ clock using one of the assigned trial events.
738 4. Assign classified TTL events to trial events based on order within the trial.
739 4. Convert Bpod software event times to DAQ clock.
740 5. Extract the wheel from the DAQ rotary encoder signal, if required.
742 Parameters
743 ----------
744 sync : dict
745 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
746 and the corresponding channel numbers. If None, the sync is loaded using the
747 `load_sync` method.
748 chmap : dict
749 A map of channel names and their corresponding indices. If None, the channel map is
750 loaded using the :meth:`FpgaTrials.load_sync` method.
751 sync_collection : str
752 The session subdirectory where the sync data are located. This is only used if the
753 sync or channel maps are not provided.
754 task_collection : str
755 The session subdirectory where the raw Bpod data are located. This is used for loading
756 the task settings and extracting the bpod trials, if not already done.
757 protocol_number : int
758 The protocol number if multiple protocols were run during the session. If provided, a
759 spacer signal must be present in order to determine the correct period.
760 kwargs
761 Optional arguments for subclass methods to use.
763 Returns
764 -------
765 dict
766 A dictionary of numpy arrays with `FpgaTrials.var_names` as keys.
767 """
768 if sync is None or chmap is None: 1fiaecgbdh
769 _sync, _chmap = self.load_sync(sync_collection) 1i
770 sync = sync or _sync 1i
771 chmap = chmap or _chmap 1i
773 if not self.bpod_trials: # extract the behaviour data from bpod 1fiaecgbdh
774 self.extractor = get_bpod_extractor(self.session_path, task_collection=task_collection)
775 _logger.info('Bpod trials extractor: %s.%s', self.extractor.__module__, self.extractor.__class__.__name__)
776 self.bpod_trials, *_ = self.extractor.extract(task_collection=task_collection, save=False, **kwargs)
778 # Explode trials table df
779 if 'table' in self.var_names: 1fiaecgbdh
780 trials_table = alfio.AlfBunch.from_df(self.bpod_trials.pop('table')) 1fiaecgbdh
781 table_columns = trials_table.keys() 1fiaecgbdh
782 self.bpod_trials.update(trials_table) 1fiaecgbdh
783 else:
784 if 'table' in self.bpod_trials: 1a
785 _logger.error(
786 '"table" found in Bpod trials but missing from `var_names` attribute and will'
787 'therefore not be extracted. This is likely in error.')
788 table_columns = None 1a
790 bpod = get_sync_fronts(sync, chmap['bpod']) 1fiaecgbdh
791 # Get the spacer times for this protocol
792 if any(arg in kwargs for arg in ('tmin', 'tmax')): 1fiaecgbdh
793 tmin, tmax = kwargs.get('tmin'), kwargs.get('tmax') 1cdh
794 elif (protocol_number := kwargs.get('protocol_number')) is not None: # look for spacer 1fiaegb
795 # The spacers are TTLs generated by Bpod at the start of each protocol
796 tmin, tmax = get_protocol_period(self.session_path, protocol_number, bpod)
797 else:
798 # Older sessions don't have protocol spacers so we sync the Bpod intervals here to
799 # find the approximate end time of the protocol (this will exclude the passive signals
800 # in ephysChoiceWorld that tend to ruin the final trial extraction).
801 _, trial_ints = self.get_bpod_event_times(sync, chmap, **kwargs) 1fiaegb
802 t_trial_start = trial_ints.get('trial_start', np.array([[np.nan, np.nan]]))[:, 0] 1fiaegb
803 bpod_start = self.bpod_trials['intervals'][:, 0] 1fiaegb
804 if len(t_trial_start) > len(bpod_start) / 2: # if least half the trial start TTLs detected 1fiaegb
805 _logger.warning('Attempting to get protocol period from aligning trial start TTLs') 1faegb
806 fcn, *_ = ibldsp.utils.sync_timestamps(bpod_start, t_trial_start) 1faegb
807 buffer = 2.5 # the number of seconds to include before/after task 1faegb
808 start, end = fcn(self.bpod_trials['intervals'].flat[[0, -1]]) 1faegb
809 # NB: The following was added by k1o0 in commit b31d14e5113180b50621c985b2f230ba84da1dd3
810 # however it is not clear why this was necessary and it appears to defeat the purpose of
811 # removing the passive protocol part from the final trial extraction in ephysChoiceWorld.
812 # tmin = min(sync['times'][0], start - buffer)
813 # tmax = max(sync['times'][-1], end + buffer)
814 tmin = start - buffer 1faegb
815 tmax = end + buffer 1faegb
816 else: # This type of alignment fails for some sessions, e.g. mesoscope
817 tmin = tmax = None 1ia
819 # Remove unnecessary data from sync
820 selection = np.logical_and( 1fiaecgbdh
821 sync['times'] <= (tmax if tmax is not None else sync['times'][-1]),
822 sync['times'] >= (tmin if tmin is not None else sync['times'][0]),
823 )
824 sync = alfio.AlfBunch({k: v[selection] for k, v in sync.items()}) 1fiaecgbdh
825 _logger.debug('Protocol period from %.2fs to %.2fs (~%.0f min duration)', 1fiaecgbdh
826 *sync['times'][[0, -1]], np.diff(sync['times'][[0, -1]]) / 60)
828 # Get the trial events from the DAQ sync TTLs, sync clocks and build final trials datasets
829 out = self.build_trials(sync=sync, chmap=chmap, **kwargs) 1fiaecgbdh
831 # extract the wheel data
832 if any(x.startswith('wheel') for x in self.var_names): 1fiaecgbdh
833 wheel, moves = self.get_wheel_positions(sync=sync, chmap=chmap, tmin=tmin, tmax=tmax) 1fiaecgbdh
834 from ibllib.io.extractors.training_wheel import extract_first_movement_times 1fiaecgbdh
835 if not self.settings: 1fiaecgbdh
836 self.settings = raw.load_settings(session_path=self.session_path, task_collection=task_collection)
837 min_qt = self.settings.get('QUIESCENT_PERIOD', None) 1fiaecgbdh
838 first_move_onsets, *_ = extract_first_movement_times(moves, out, min_qt=min_qt) 1fiaecgbdh
839 out.update({'firstMovement_times': first_move_onsets}) 1fiaecgbdh
840 out.update({f'wheel_{k}': v for k, v in wheel.items()}) 1fiaecgbdh
841 out.update({f'wheelMoves_{k}': v for k, v in moves.items()}) 1fiaecgbdh
843 # Re-create trials table
844 if table_columns: 1fiaecgbdh
845 trials_table = alfio.AlfBunch({x: out.pop(x) for x in table_columns}) 1fiaecgbdh
846 out['table'] = trials_table.to_df() 1fiaecgbdh
848 out = alfio.AlfBunch({k: out[k] for k in self.var_names if k in out}) # Reorder output 1fiaecgbdh
849 assert self.var_names == tuple(out.keys()) 1fiaecgbdh
850 return out 1fiaecgbdh
852 def _is_trials_object_attribute(self, var_name, variable_length_vars=None):
853 """
854 Check if variable name is expected to have the same length as trials.intervals.
856 Parameters
857 ----------
858 var_name : str
859 The variable name to check.
860 variable_length_vars : list
861 Set of variable names that are not expected to have the same length as trials.intervals.
862 This list may be passed by superclasses.
864 Returns
865 -------
866 bool
867 True if variable is a trials dataset.
869 Examples
870 --------
871 >>> assert self._is_trials_object_attribute('stimOnTrigger_times') is True
872 >>> assert self._is_trials_object_attribute('wheel_position') is False
873 """
874 save_name = self.save_names[self.var_names.index(var_name)] if var_name in self.var_names else None 11faecgbdh
875 if save_name: 11faecgbdh
876 return filename_parts(save_name)[1] == 'trials' 11faecgbdh
877 else:
878 return var_name not in (variable_length_vars or []) 11faecgbdh
880 def build_trials(self, sync, chmap, display=False, **kwargs):
881 """
882 Extract task related event times from the sync.
884 The trial start times are the shortest Bpod TTLs and occur at the start of the trial. The
885 first trial start TTL of the session is longer and must be handled differently. The trial
886 start TTL is used to assign the other trial events to each trial.
888 The trial end is the end of the so-called 'ITI' Bpod event TTL (classified as the longest
889 of the three Bpod event TTLs). Go cue audio TTLs are the shorter of the two expected audio
890 tones. The first of these after each trial start is taken to be the go cue time. Error
891 tones are longer audio TTLs and assigned as the last of such occurrence after each trial
892 start. The valve open Bpod TTLs are medium-length, the last of which is used for each trial.
893 The feedback times are times of either valve open or error tone as there should be only one
894 such event per trial.
896 The stimulus times are taken from the frame2ttl events (with improbably high frequency TTLs
897 removed): the first TTL after each trial start is assumed to be the stim onset time; the
898 second to last and last are taken as the stimulus freeze and offset times, respectively.
900 Parameters
901 ----------
902 sync : dict
903 'polarities' of fronts detected on sync trace for all 16 chans and their 'times'
904 chmap : dict
905 Map of channel names and their corresponding index. Default to constant.
906 display : bool, matplotlib.pyplot.Axes
907 Show the full session sync pulses display.
909 Returns
910 -------
911 dict
912 A map of trial event timestamps.
913 """
914 # Get the events from the sync.
915 # Store the cleaned frame2ttl, audio, and bpod pulses as this will be used for QC
916 self.frame2ttl = self.get_stimulus_update_times(sync, chmap, **kwargs) 1faecgbdh
917 self.audio, audio_event_intervals = self.get_audio_event_times(sync, chmap, **kwargs) 1faecgbdh
918 if not set(audio_event_intervals.keys()) >= {'ready_tone', 'error_tone'}: 1faecgbdh
919 raise ValueError(
920 'Expected at least "ready_tone" and "error_tone" audio events.'
921 '`audio_event_ttls` kwarg may be incorrect.')
922 self.bpod, bpod_event_intervals = self.get_bpod_event_times(sync, chmap, **kwargs) 1faecgbdh
923 if not set(bpod_event_intervals.keys()) >= {'trial_start', 'valve_open', 'trial_end'}: 1faecgbdh
924 raise ValueError(
925 'Expected at least "trial_start", "trial_end", and "valve_open" audio events. '
926 '`bpod_event_ttls` kwarg may be incorrect.')
928 t_iti_in, t_trial_end = bpod_event_intervals['trial_end'].T 1faecgbdh
929 fpga_events = alfio.AlfBunch({ 1faecgbdh
930 'goCue_times': audio_event_intervals['ready_tone'][:, 0],
931 'errorCue_times': audio_event_intervals['error_tone'][:, 0],
932 'valveOpen_times': bpod_event_intervals['valve_open'][:, 0],
933 'valveClose_times': bpod_event_intervals['valve_open'][:, 1],
934 'itiIn_times': t_iti_in,
935 'intervals_0': bpod_event_intervals['trial_start'][:, 0],
936 'intervals_1': t_trial_end
937 })
939 # Sync the Bpod clock to the DAQ.
940 # NB: The Bpod extractor typically drops the final, incomplete, trial. Hence there is
941 # usually at least one extra FPGA event. This shouldn't affect the sync. The final trial is
942 # dropped after assigning the FPGA events, using the `ibpod` index. Doing this after
943 # assigning the FPGA trial events ensures the last trial has the correct timestamps.
944 self.bpod2fpga, drift_ppm, ibpod, ifpga = self.sync_bpod_clock(self.bpod_trials, fpga_events, self.sync_field) 1faecgbdh
946 bpod_start = self.bpod2fpga(self.bpod_trials['intervals'][:, 0]) 1faecgbdh
947 missing_bpod_idx = np.setxor1d(ibpod, np.arange(len(bpod_start))) 1faecgbdh
948 if missing_bpod_idx.size > 0 and self.sync_field == 'intervals_0': 1faecgbdh
949 # One issue is that sometimes pulses may not have been detected, in this case
950 # add the events that have not been detected and re-extract the behaviour sync.
951 # This is only really relevant for the Bpod interval events as the other TTLs are
952 # from devices where a missing TTL likely means the Bpod event was truly absent.
953 _logger.warning('Missing Bpod TTLs; reassigning events using aligned Bpod start times') 1cb
954 missing_bpod = bpod_start[missing_bpod_idx] 1cb
955 # Another complication: if the first trial start is missing on the FPGA, the second
956 # trial start is assumed to be the first and is mis-assigned to another trial event
957 # (i.e. valve open). This is done because the first Bpod pulse is irregularly long.
958 # See `FpgaTrials.get_bpod_event_times` for details.
960 # If first trial start is missing first detected FPGA event doesn't match any Bpod
961 # starts then it's probably a mis-assigned valve or trial end event.
962 i1 = np.any(missing_bpod_idx == 0) and not np.any(np.isclose(fpga_events['intervals_0'][0], bpod_start)) 1cb
963 # skip mis-assigned first FPGA trial start
964 t_trial_start = np.sort(np.r_[fpga_events['intervals_0'][int(i1):], missing_bpod]) 1cb
965 ibpod = np.sort(np.r_[ibpod, missing_bpod_idx]) 1cb
966 if i1: 1cb
967 # The first trial start is actually the first valve open here
968 first_on, first_off = bpod_event_intervals['trial_start'][0, :] 1cb
969 bpod_valve_open = self.bpod2fpga(self.bpod_trials['feedback_times'][self.bpod_trials['feedbackType'] == 1]) 1cb
970 if np.any(np.isclose(first_on, bpod_valve_open)): 1cb
971 # Probably assigned to the valve open
972 _logger.debug('Re-reassigning first valve open event. TTL length = %.3g ms', first_off - first_on) 1c
973 fpga_events['valveOpen_times'] = np.sort(np.r_[first_on, fpga_events['valveOpen_times']]) 1c
974 fpga_events['valveClose_times'] = np.sort(np.r_[first_off, fpga_events['valveClose_times']]) 1c
975 elif np.any(np.isclose(first_on, self.bpod2fpga(self.bpod_trials['itiIn_times']))): 1cb
976 # Probably assigned to the trial end
977 _logger.debug('Re-reassigning first trial end event. TTL length = %.3g ms', first_off - first_on) 1c
978 fpga_events['itiIn_times'] = np.sort(np.r_[first_on, fpga_events['itiIn_times']]) 1c
979 fpga_events['intervals_1'] = np.sort(np.r_[first_off, fpga_events['intervals_1']]) 1c
980 else:
981 _logger.warning('Unable to reassign first trial start event. TTL length = %.3g ms', first_off - first_on) 1b
982 # Bpod trial_start event intervals are not used but for consistency we'll update them here anyway
983 bpod_event_intervals['trial_start'] = bpod_event_intervals['trial_start'][1:, :] 1cb
984 else:
985 t_trial_start = fpga_events['intervals_0'] 1faegdh
987 out = alfio.AlfBunch() 1faecgbdh
988 # Add the Bpod trial events, converting the timestamp fields to FPGA time.
989 # NB: The trial intervals are by default a Bpod rsync field.
990 out.update({k: self.bpod_trials[k][ibpod] for k in self.bpod_fields}) 1faecgbdh
991 for k in self.bpod_rsync_fields: 1faecgbdh
992 # Some personal projects may extract non-trials object datasets that may not have 1 event per trial
993 idx = ibpod if self._is_trials_object_attribute(k) else np.arange(len(self.bpod_trials[k]), dtype=int) 1faecgbdh
994 out[k] = self.bpod2fpga(self.bpod_trials[k][idx]) 1faecgbdh
996 f2ttl_t = self.frame2ttl['times'] 1faecgbdh
997 # Assign the FPGA events to individual trials
998 fpga_trials = { 1faecgbdh
999 'goCue_times': _assign_events_to_trial(t_trial_start, fpga_events['goCue_times'], take='first'),
1000 'errorCue_times': _assign_events_to_trial(t_trial_start, fpga_events['errorCue_times']),
1001 'valveOpen_times': _assign_events_to_trial(t_trial_start, fpga_events['valveOpen_times']),
1002 'itiIn_times': _assign_events_to_trial(t_trial_start, fpga_events['itiIn_times']),
1003 'stimOn_times': np.full_like(t_trial_start, np.nan),
1004 'stimOff_times': np.full_like(t_trial_start, np.nan),
1005 'stimFreeze_times': np.full_like(t_trial_start, np.nan)
1006 }
1008 # f2ttl times are unreliable owing to calibration and Bonsai sync square update issues.
1009 # Take the first event after the FPGA aligned stimulus trigger time.
1010 fpga_trials['stimOn_times'] = _assign_events_to_trial( 1faecgbdh
1011 out['stimOnTrigger_times'], f2ttl_t, take='first', t_trial_end=out['stimOffTrigger_times'])
1012 fpga_trials['stimOff_times'] = _assign_events_to_trial( 1faecgbdh
1013 out['stimOffTrigger_times'], f2ttl_t, take='first', t_trial_end=out['intervals'][:, 1])
1014 # For stim freeze we take the last event before the stim off trigger time.
1015 # To avoid assigning early events (e.g. for sessions where there are few flips due to
1016 # mis-calibration), we discount events before stim freeze trigger times (or stim on trigger
1017 # times for versions below 6.2.5). We take the last event rather than the first after stim
1018 # freeze trigger because often there are multiple flips after the trigger, presumably
1019 # before the stim actually stops.
1020 stim_freeze = np.copy(out['stimFreezeTrigger_times']) 1faecgbdh
1021 go_trials = np.where(out['choice'] != 0)[0] 1faecgbdh
1022 # NB: versions below 6.2.5 have no trigger times so use stim on trigger times
1023 lims = np.copy(out['stimOnTrigger_times']) 1faecgbdh
1024 if not np.isnan(stim_freeze).all(): 1faecgbdh
1025 # Stim freeze times are NaN for nogo trials, but for all others use stim freeze trigger
1026 # times. _assign_events_to_trial requires ascending timestamps so no NaNs allowed.
1027 lims[go_trials] = stim_freeze[go_trials] 1febd
1028 # take last event after freeze/stim on trigger, before stim off trigger
1029 stim_freeze = _assign_events_to_trial(lims, f2ttl_t, take='last', t_trial_end=out['stimOffTrigger_times']) 1faecgbdh
1030 fpga_trials['stimFreeze_times'][go_trials] = stim_freeze[go_trials] 1faecgbdh
1031 # Feedback times are valve open on correct trials and error tone in on incorrect trials
1032 fpga_trials['feedback_times'] = np.copy(fpga_trials['valveOpen_times']) 1faecgbdh
1033 ind_err = np.isnan(fpga_trials['valveOpen_times']) 1faecgbdh
1034 fpga_trials['feedback_times'][ind_err] = fpga_trials['errorCue_times'][ind_err] 1faecgbdh
1036 # Use ibpod to discard the final trial if it is incomplete
1037 # ibpod should be indices of all Bpod trials, even those that were not detected on the FPGA
1038 out.update({k: fpga_trials[k][ibpod] for k in fpga_trials.keys()}) 1faecgbdh
1040 if display: # pragma: no cover 1faecgbdh
1041 width = 2
1042 ymax = 5
1043 if isinstance(display, bool):
1044 plt.figure('Bpod FPGA Sync')
1045 ax = plt.gca()
1046 else:
1047 ax = display
1048 plots.squares(self.bpod['times'], self.bpod['polarities'] * 0.4 + 1, ax=ax, color='k')
1049 plots.squares(self.frame2ttl['times'], self.frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k')
1050 plots.squares(self.audio['times'], self.audio['polarities'] * 0.4 + 3, ax=ax, color='k')
1051 color_map = TABLEAU_COLORS.keys()
1052 for (event_name, event_times), c in zip(fpga_events.items(), cycle(color_map)):
1053 plots.vertical_lines(event_times, ymin=0, ymax=ymax, ax=ax, color=c, label=event_name, linewidth=width)
1054 # Plot the stimulus events along with the trigger times
1055 stim_events = filter(lambda t: 'stim' in t[0], fpga_trials.items())
1056 for (event_name, event_times), c in zip(stim_events, cycle(color_map)):
1057 plots.vertical_lines(
1058 event_times, ymin=0, ymax=ymax, ax=ax, color=c, label=event_name, linewidth=width, linestyle='--')
1059 nm = event_name.replace('_times', 'Trigger_times')
1060 plots.vertical_lines(
1061 out[nm], ymin=0, ymax=ymax, ax=ax, color=c, label=nm, linewidth=width, linestyle=':')
1062 ax.legend()
1063 ax.set_yticks([0, 1, 2, 3])
1064 ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio'])
1065 ax.set_ylim([0, 5])
1066 return out 1faecgbdh
1068 def get_wheel_positions(self, *args, **kwargs):
1069 """Extract wheel and wheelMoves objects.
1071 This method is called by the main extract method and may be overloaded by subclasses.
1072 """
1073 return get_wheel_positions(*args, **kwargs) 1faecgbdh
1075 def get_stimulus_update_times(self, sync, chmap, display=False, **_):
1076 """
1077 Extract stimulus update times from sync.
1079 Gets the stimulus times from the frame2ttl channel and cleans the signal.
1081 Parameters
1082 ----------
1083 sync : dict
1084 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
1085 and the corresponding channel numbers.
1086 chmap : dict
1087 A map of channel names and their corresponding indices. Must contain a 'frame2ttl' key.
1088 display : bool
1089 If true, plots the input TTLs and the cleaned output.
1091 Returns
1092 -------
1093 dict
1094 A dictionary with keys {'times', 'polarities'} containing stimulus TTL fronts.
1095 """
1096 frame2ttl = get_sync_fronts(sync, chmap['frame2ttl']) 1fiaecgbdh
1097 frame2ttl = _clean_frame2ttl(frame2ttl, display=display) 1fiaecgbdh
1098 return frame2ttl 1fiaecgbdh
1100 def get_audio_event_times(self, sync, chmap, audio_event_ttls=None, display=False, **_):
1101 """
1102 Extract audio times from sync.
1104 Gets the TTL times from the 'audio' channel, cleans the signal, and classifies each TTL
1105 event by length.
1107 Parameters
1108 ----------
1109 sync : dict
1110 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
1111 and the corresponding channel numbers.
1112 chmap : dict
1113 A map of channel names and their corresponding indices. Must contain an 'audio' key.
1114 audio_event_ttls : dict
1115 A map of event names to (min, max) TTL length.
1116 display : bool
1117 If true, plots the input TTLs and the cleaned output.
1119 Returns
1120 -------
1121 dict
1122 A dictionary with keys {'times', 'polarities'} containing audio TTL fronts.
1123 dict
1124 A dictionary of events (from `audio_event_ttls`) and their intervals as an Nx2 array.
1125 """
1126 audio = get_sync_fronts(sync, chmap['audio']) 1vfiaecgbdh
1127 audio = _clean_audio(audio) 1vfiaecgbdh
1129 if audio['times'].size == 0: 1vfiaecgbdh
1130 _logger.error('No audio sync fronts found.')
1132 if audio_event_ttls is None: 1vfiaecgbdh
1133 # For training/biased/ephys protocols, the ready tone should be below 110 ms. The error
1134 # tone should be between 400ms and 1200ms
1135 audio_event_ttls = {'ready_tone': (0, 0.1101), 'error_tone': (0.4, 1.2)} 1vfiaecgbdh
1136 audio_event_intervals = self._assign_events(audio['times'], audio['polarities'], audio_event_ttls, display=display) 1vfiaecgbdh
1138 return audio, audio_event_intervals 1vfiaecgbdh
1140 def get_bpod_event_times(self, sync, chmap, bpod_event_ttls=None, display=False, **kwargs):
1141 """
1142 Extract Bpod times from sync.
1144 Gets the Bpod TTL times from the sync 'bpod' channel and classifies each TTL event by
1145 length. NB: The first trial has an abnormal trial_start TTL that is usually mis-assigned.
1146 This method accounts for this.
1148 Parameters
1149 ----------
1150 sync : dict
1151 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
1152 and the corresponding channel numbers. Must contain a 'bpod' key.
1153 chmap : dict
1154 A map of channel names and their corresponding indices.
1155 bpod_event_ttls : dict of tuple
1156 A map of event names to (min, max) TTL length.
1158 Returns
1159 -------
1160 dict
1161 A dictionary with keys {'times', 'polarities'} containing Bpod TTL fronts.
1162 dict
1163 A dictionary of events (from `bpod_event_ttls`) and their intervals as an Nx2 array.
1164 """
1165 bpod = get_sync_fronts(sync, chmap['bpod']) 1usfiaecgblmnkjdh
1166 if bpod.times.size == 0: 1usfiaecgblmnkjdh
1167 raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. '
1168 'Check channel maps.')
1169 # Assign the Bpod BNC2 events based on TTL length. The defaults are below, however these
1170 # lengths are defined by the state machine of the task protocol and therefore vary.
1171 if bpod_event_ttls is None: 1usfiaecgblmnkjdh
1172 # For training/biased/ephys protocols, the trial start TTL length is 0.1ms but this has
1173 # proven to drift on some Bpods and this is the highest possible value that
1174 # discriminates trial start from valve. Valve open events are between 50ms to 300 ms.
1175 # ITI events are above 400 ms.
1176 bpod_event_ttls = { 1usfaecgblmnkjdh
1177 'trial_start': (0, 2.33e-4), 'valve_open': (2.33e-4, 0.4), 'trial_end': (0.4, np.inf)}
1178 bpod_event_intervals = self._assign_events( 1usfiaecgblmnkjdh
1179 bpod['times'], bpod['polarities'], bpod_event_ttls, display=display)
1181 if 'trial_start' not in bpod_event_intervals or bpod_event_intervals['trial_start'].size == 0: 1usfiaecgblmnkjdh
1182 return bpod, bpod_event_intervals 1i
1184 # The first trial pulse is longer and often assigned to another event.
1185 # Here we move the earliest non-trial_start event to the trial_start array.
1186 t0 = bpod_event_intervals['trial_start'][0, 0] # expect 1st event to be trial_start 1usfaecgblmnkjdh
1187 pretrial = [(k, v[0, 0]) for k, v in bpod_event_intervals.items() if v.size and v[0, 0] < t0] 1usfaecgblmnkjdh
1188 if pretrial: 1usfaecgblmnkjdh
1189 (pretrial, _) = sorted(pretrial, key=lambda x: x[1])[0] # take the earliest event 1usfaecgblmnkjdh
1190 dt = np.diff(bpod_event_intervals[pretrial][0, :]) * 1e3 # record TTL length to log 1usfaecgblmnkjdh
1191 _logger.debug('Reassigning first %s to trial_start. TTL length = %.3g ms', pretrial, dt) 1usfaecgblmnkjdh
1192 bpod_event_intervals['trial_start'] = np.r_[ 1usfaecgblmnkjdh
1193 bpod_event_intervals[pretrial][0:1, :], bpod_event_intervals['trial_start']
1194 ]
1195 bpod_event_intervals[pretrial] = bpod_event_intervals[pretrial][1:, :] 1usfaecgblmnkjdh
1197 return bpod, bpod_event_intervals 1usfaecgblmnkjdh
1199 @staticmethod
1200 def _assign_events(ts, polarities, event_lengths, precedence='shortest', display=False):
1201 """
1202 Classify TTL events by length.
1204 Outputs the synchronisation events such as trial intervals, valve opening, and audio.
1206 Parameters
1207 ----------
1208 ts : numpy.array
1209 Numpy vector containing times of TTL fronts.
1210 polarities : numpy.array
1211 Numpy vector containing polarity of TTL fronts (1 rise, -1 fall).
1212 event_lengths : dict of tuple
1213 A map of TTL events and the range of permissible lengths, where l0 < ttl <= l1.
1214 precedence : str {'shortest', 'longest', 'dict order'}
1215 In the case of overlapping event TTL lengths, assign shortest/longest first or go by
1216 the `event_lengths` dict order.
1217 display : bool
1218 If true, plots the TTLs with coloured lines delineating the assigned events.
1220 Returns
1221 -------
1222 Dict[str, numpy.array]
1223 A dictionary of events and their intervals as an Nx2 array.
1225 See Also
1226 --------
1227 _assign_events_to_trial - classify TTLs by event order within a given trial period.
1228 """
1229 event_intervals = dict.fromkeys(event_lengths) 1vusfiaecgblmnkjdh
1230 assert 'unassigned' not in event_lengths.keys() 1vusfiaecgblmnkjdh
1232 if len(ts) == 0: 1vusfiaecgblmnkjdh
1233 return {k: np.array([[], []]).T for k in (*event_lengths.keys(), 'unassigned')}
1235 # make sure that there are no 2 consecutive fall or consecutive rise events
1236 assert np.all(np.abs(np.diff(polarities)) == 2) 1vusfiaecgblmnkjdh
1237 if polarities[0] == -1: 1vusfiaecgblmnkjdh
1238 ts = np.delete(ts, 0) 1vs
1239 if polarities[-1] == 1: # if the final TTL is left HIGH, insert a NaN 1vusfiaecgblmnkjdh
1240 ts = np.r_[ts, np.nan] 1s
1241 # take only even time differences: i.e. from rising to falling fronts
1242 dt = np.diff(ts)[::2] 1vusfiaecgblmnkjdh
1244 # Assign events from shortest TTL to largest
1245 assigned = np.zeros(ts.shape, dtype=bool) 1vusfiaecgblmnkjdh
1246 if precedence.lower() == 'shortest': 1vusfiaecgblmnkjdh
1247 event_items = sorted(event_lengths.items(), key=lambda x: np.diff(x[1])) 1vusfiaecgblmnkjdh
1248 elif precedence.lower() == 'longest':
1249 event_items = sorted(event_lengths.items(), key=lambda x: np.diff(x[1]), reverse=True)
1250 elif precedence.lower() == 'dict order':
1251 event_items = event_lengths.items()
1252 else:
1253 raise ValueError(f'Precedence must be one of "shortest", "longest", "dict order", got "{precedence}".')
1254 for event, (min_len, max_len) in event_items: 1vusfiaecgblmnkjdh
1255 _logger.debug('%s: %.4G < ttl <= %.4G', event, min_len, max_len) 1vusfiaecgblmnkjdh
1256 i_event = np.where(np.logical_and(dt > min_len, dt <= max_len))[0] * 2 1vusfiaecgblmnkjdh
1257 i_event = i_event[np.where(~assigned[i_event])[0]] # remove those already assigned 1vusfiaecgblmnkjdh
1258 event_intervals[event] = np.c_[ts[i_event], ts[i_event + 1]] 1vusfiaecgblmnkjdh
1259 assigned[np.r_[i_event, i_event + 1]] = True 1vusfiaecgblmnkjdh
1261 # Include the unassigned events for convenience and debugging
1262 event_intervals['unassigned'] = ts[~assigned].reshape(-1, 2) 1vusfiaecgblmnkjdh
1264 # Assert that event TTLs mutually exclusive
1265 all_assigned = np.concatenate(list(event_intervals.values())).flatten() 1vusfiaecgblmnkjdh
1266 assert all_assigned.size == np.unique(all_assigned).size, 'TTLs assigned to multiple events' 1vusfiaecgblmnkjdh
1268 # some debug plots when needed
1269 if display: # pragma: no cover 1vusfiaecgblmnkjdh
1270 plt.figure()
1271 plots.squares(ts, polarities, label='raw fronts')
1272 for event, intervals in event_intervals.items():
1273 plots.vertical_lines(intervals[:, 0], ymin=-0.2, ymax=1.1, linewidth=0.5, label=event)
1274 plt.legend()
1276 # Return map of event intervals in the same order as `event_lengths` dict
1277 return {k: event_intervals[k] for k in (*event_lengths, 'unassigned')} 1vusfiaecgblmnkjdh
1279 @staticmethod
1280 def sync_bpod_clock(bpod_trials, fpga_trials, sync_field):
1281 """
1282 Sync the Bpod clock to FPGA one using the provided trial event.
1284 It assumes that `sync_field` is in both `fpga_trials` and `bpod_trials`. Syncing on both
1285 intervals is not supported so to sync on trial start times, `sync_field` should be
1286 'intervals_0'.
1288 Parameters
1289 ----------
1290 bpod_trials : dict
1291 A dictionary of extracted Bpod trial events.
1292 fpga_trials : dict
1293 A dictionary of TTL events extracted from FPGA sync (see `extract_behaviour_sync`
1294 method).
1295 sync_field : str
1296 The trials key to use for syncing clocks. For intervals (i.e. Nx2 arrays) append the
1297 column index, e.g. 'intervals_0'.
1299 Returns
1300 -------
1301 function
1302 Interpolation function such that f(timestamps_bpod) = timestamps_fpga.
1303 float
1304 The clock drift in parts per million.
1305 numpy.array of int
1306 The indices of the Bpod trial events in the FPGA trial events array.
1307 numpy.array of int
1308 The indices of the FPGA trial events in the Bpod trial events array.
1310 Raises
1311 ------
1312 ValueError
1313 The key `sync_field` was not found in either the `bpod_trials` or `fpga_trials` dicts.
1314 """
1315 _logger.info(f'Attempting to align Bpod clock to DAQ using trial event "{sync_field}"') 1fiaecgbdh
1316 bpod_fpga_timestamps = [None, None] 1fiaecgbdh
1317 for i, trials in enumerate((bpod_trials, fpga_trials)): 1fiaecgbdh
1318 if sync_field not in trials: 1fiaecgbdh
1319 # handle syncing on intervals
1320 if not (m := re.match(r'(.*)_(\d)', sync_field)): 1faecgbdh
1321 # If missing from bpod trials, either the sync field is incorrect,
1322 # or the Bpod extractor is incorrect. If missing from the fpga events, check
1323 # the sync field and the `extract_behaviour_sync` method.
1324 raise ValueError(
1325 f'Sync field "{sync_field}" not in extracted {"fpga" if i else "bpod"} events')
1326 _sync_field, n = m.groups() 1faecgbdh
1327 bpod_fpga_timestamps[i] = trials[_sync_field][:, int(n)] 1faecgbdh
1328 else:
1329 bpod_fpga_timestamps[i] = trials[sync_field] 1fiaecgbdh
1331 # Sync the two timestamps
1332 fcn, drift, ibpod, ifpga = ibldsp.utils.sync_timestamps(*bpod_fpga_timestamps, return_indices=True) 1fiaecgbdh
1334 # If it's drifting too much throw warning or error
1335 _logger.info('N trials: %i bpod, %i FPGA, %i merged, sync %.5f ppm', 1fiaecgbdh
1336 *map(len, bpod_fpga_timestamps), len(ibpod), drift)
1337 if drift > 200 and bpod_fpga_timestamps[0].size != bpod_fpga_timestamps[1].size: 1fiaecgbdh
1338 raise err.SyncBpodFpgaException('sync cluster f*ck')
1339 elif drift > BPOD_FPGA_DRIFT_THRESHOLD_PPM: 1fiaecgbdh
1340 _logger.warning('BPOD/FPGA synchronization shows values greater than %.2f ppm',
1341 BPOD_FPGA_DRIFT_THRESHOLD_PPM)
1343 return fcn, drift, ibpod, ifpga 1fiaecgbdh
1346class FpgaTrialsHabituation(FpgaTrials):
1347 """Extract habituationChoiceWorld trial events from an NI DAQ."""
1349 save_names = ('_ibl_trials.stimCenter_times.npy', '_ibl_trials.feedbackType.npy', '_ibl_trials.rewardVolume.npy',
1350 '_ibl_trials.stimOff_times.npy', '_ibl_trials.contrastLeft.npy', '_ibl_trials.contrastRight.npy',
1351 '_ibl_trials.feedback_times.npy', '_ibl_trials.stimOn_times.npy', '_ibl_trials.stimOnTrigger_times.npy',
1352 '_ibl_trials.intervals.npy', '_ibl_trials.goCue_times.npy', '_ibl_trials.goCueTrigger_times.npy',
1353 None, None, None, None, None)
1354 """tuple of str: The filenames of each extracted dataset, or None if array should not be saved."""
1356 var_names = ('stimCenter_times', 'feedbackType', 'rewardVolume', 'stimOff_times', 'contrastLeft',
1357 'contrastRight', 'feedback_times', 'stimOn_times', 'stimOnTrigger_times', 'intervals',
1358 'goCue_times', 'goCueTrigger_times', 'itiIn_times', 'stimOffTrigger_times',
1359 'stimCenterTrigger_times', 'position', 'phase')
1360 """tuple of str: A list of names for the extracted variables. These become the returned output keys."""
1362 bpod_rsync_fields = ('intervals', 'stimOn_times', 'feedback_times', 'stimCenterTrigger_times',
1363 'goCue_times', 'itiIn_times', 'stimOffTrigger_times', 'stimOff_times',
1364 'stimCenter_times', 'stimOnTrigger_times', 'goCueTrigger_times')
1365 """tuple of str: Fields from Bpod extractor that we want to re-sync to FPGA."""
1367 bpod_fields = ('feedbackType', 'rewardVolume', 'contrastLeft', 'contrastRight', 'position', 'phase')
1368 """tuple of str: Fields from Bpod extractor that we want to save."""
1370 sync_field = 'feedback_times' # valve open events
1371 """str: The trial event to synchronize (must be present in extracted trials)."""
1373 def _extract(self, sync=None, chmap=None, sync_collection='raw_ephys_data',
1374 task_collection='raw_behavior_data', **kwargs) -> dict:
1375 """
1376 Extract habituationChoiceWorld trial events from an NI DAQ.
1378 It is essential that the `var_names`, `bpod_rsync_fields`, `bpod_fields`, and `sync_field`
1379 attributes are all correct for the bpod protocol used.
1381 Unlike FpgaTrials, this class assumes different Bpod TTL events and syncs the Bpod clock
1382 using the valve open times, instead of the trial start times.
1384 Parameters
1385 ----------
1386 sync : dict
1387 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
1388 and the corresponding channel numbers. If None, the sync is loaded using the
1389 `load_sync` method.
1390 dict
1391 A map of channel names and their corresponding indices. If None, the channel map is
1392 loaded using the `load_sync` method.
1393 sync_collection : str
1394 The session subdirectory where the sync data are located. This is only used if the
1395 sync or channel maps are not provided.
1396 task_collection : str
1397 The session subdirectory where the raw Bpod data are located. This is used for loading
1398 the task settings and extracting the bpod trials, if not already done.
1399 protocol_number : int
1400 The protocol number if multiple protocols were run during the session. If provided, a
1401 spacer signal must be present in order to determine the correct period.
1402 kwargs
1403 Optional arguments for class methods, e.g. 'display', 'bpod_event_ttls'.
1405 Returns
1406 -------
1407 dict
1408 A dictionary of numpy arrays with `FpgaTrialsHabituation.var_names` as keys.
1409 """
1410 # Version check: the ITI in TTL was added in a later version
1411 if not self.settings: 1a
1412 self.settings = raw.load_settings(session_path=self.session_path, task_collection=task_collection)
1413 iblrig_version = version.parse(self.settings.get('IBL_VERSION', '0.0.0')) 1a
1414 if version.parse('8.9.3') <= iblrig_version < version.parse('8.12.6'): 1a
1415 """A second 1s TTL was added in this version during the 'iti' state, however this is
1416 unrelated to the trial ITI and is unfortunately the same length as the trial start TTL."""
1417 raise NotImplementedError('Ambiguous TTLs in 8.9.3 >= version < 8.12.6')
1419 trials = super()._extract(sync=sync, chmap=chmap, sync_collection=sync_collection, 1a
1420 task_collection=task_collection, **kwargs)
1422 return trials 1a
1424 def get_bpod_event_times(self, sync, chmap, bpod_event_ttls=None, display=False, **kwargs):
1425 """
1426 Extract Bpod times from sync.
1428 Currently (at least v8.12 and below) there is no trial start or end TTL, only an ITI pulse.
1429 Also the first trial pulse is incorrectly assigned due to its abnormal length.
1431 Parameters
1432 ----------
1433 sync : dict
1434 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses
1435 and the corresponding channel numbers. Must contain a 'bpod' key.
1436 chmap : dict
1437 A map of channel names and their corresponding indices.
1438 bpod_event_ttls : dict of tuple
1439 A map of event names to (min, max) TTL length.
1441 Returns
1442 -------
1443 dict
1444 A dictionary with keys {'times', 'polarities'} containing Bpod TTL fronts.
1445 dict
1446 A dictionary of events (from `bpod_event_ttls`) and their intervals as an Nx2 array.
1447 """
1448 bpod = get_sync_fronts(sync, chmap['bpod']) 1a
1449 if bpod.times.size == 0: 1a
1450 raise err.SyncBpodFpgaException('No Bpod event found in FPGA. No behaviour extraction. '
1451 'Check channel maps.')
1452 # Assign the Bpod BNC2 events based on TTL length. The defaults are below, however these
1453 # lengths are defined by the state machine of the task protocol and therefore vary.
1454 if bpod_event_ttls is None: 1a
1455 # Currently (at least v8.12 and below) there is no trial start or end TTL, only an ITI pulse
1456 bpod_event_ttls = {'trial_iti': (1, 1.1), 'valve_open': (0, 0.4)} 1a
1457 bpod_event_intervals = self._assign_events( 1a
1458 bpod['times'], bpod['polarities'], bpod_event_ttls, display=display)
1460 # The first trial pulse is shorter and assigned to valve_open. Here we remove the first
1461 # valve event, prepend a 0 to the trial_start events, and drop the last trial if it was
1462 # incomplete in Bpod.
1463 bpod_event_intervals['trial_iti'] = np.r_[bpod_event_intervals['valve_open'][0:1, :], 1a
1464 bpod_event_intervals['trial_iti']]
1465 bpod_event_intervals['valve_open'] = bpod_event_intervals['valve_open'][1:, :] 1a
1467 return bpod, bpod_event_intervals 1a
1469 def build_trials(self, sync, chmap, display=False, **kwargs):
1470 """
1471 Extract task related event times from the sync.
1473 This is called by the superclass `_extract` method. The key difference here is that the
1474 `trial_start` LOW->HIGH is the trial end, and HIGH->LOW is trial start.
1476 Parameters
1477 ----------
1478 sync : dict
1479 'polarities' of fronts detected on sync trace for all 16 chans and their 'times'
1480 chmap : dict
1481 Map of channel names and their corresponding index. Default to constant.
1482 display : bool, matplotlib.pyplot.Axes
1483 Show the full session sync pulses display.
1485 Returns
1486 -------
1487 dict
1488 A map of trial event timestamps.
1489 """
1490 # Get the events from the sync.
1491 # Store the cleaned frame2ttl, audio, and bpod pulses as this will be used for QC
1492 self.frame2ttl = self.get_stimulus_update_times(sync, chmap, **kwargs) 1a
1493 self.audio, audio_event_intervals = self.get_audio_event_times(sync, chmap, **kwargs) 1a
1494 self.bpod, bpod_event_intervals = self.get_bpod_event_times(sync, chmap, **kwargs) 1a
1495 if not set(bpod_event_intervals.keys()) >= {'valve_open', 'trial_iti'}: 1a
1496 raise ValueError(
1497 'Expected at least "trial_iti" and "valve_open" Bpod events. `bpod_event_ttls` kwarg may be incorrect.')
1499 fpga_events = alfio.AlfBunch({ 1a
1500 'feedback_times': bpod_event_intervals['valve_open'][:, 0],
1501 'valveClose_times': bpod_event_intervals['valve_open'][:, 1],
1502 'intervals_0': bpod_event_intervals['trial_iti'][:, 1],
1503 'intervals_1': bpod_event_intervals['trial_iti'][:, 0],
1504 'goCue_times': audio_event_intervals['ready_tone'][:, 0]
1505 })
1507 # Sync the Bpod clock to the DAQ.
1508 self.bpod2fpga, drift_ppm, ibpod, ifpga = self.sync_bpod_clock(self.bpod_trials, fpga_events, self.sync_field) 1a
1510 out = alfio.AlfBunch() 1a
1511 # Add the Bpod trial events, converting the timestamp fields to FPGA time.
1512 # NB: The trial intervals are by default a Bpod rsync field.
1513 out.update({k: self.bpod_trials[k][ibpod] for k in self.bpod_fields}) 1a
1514 out.update({k: self.bpod2fpga(self.bpod_trials[k][ibpod]) for k in self.bpod_rsync_fields}) 1a
1516 # Assigning each event to a trial ensures exactly one event per trial (missing events are NaN)
1517 assign_to_trial = partial(_assign_events_to_trial, fpga_events['intervals_0']) 1a
1518 trials = alfio.AlfBunch({ 1a
1519 'goCue_times': assign_to_trial(fpga_events['goCue_times'], take='first'),
1520 'feedback_times': assign_to_trial(fpga_events['feedback_times']),
1521 'stimCenter_times': assign_to_trial(self.frame2ttl['times'], take=-2),
1522 'stimOn_times': assign_to_trial(self.frame2ttl['times'], take='first'),
1523 'stimOff_times': assign_to_trial(self.frame2ttl['times']),
1524 })
1525 out.update({k: trials[k][ifpga] for k in trials.keys()}) 1a
1527 # If stim on occurs before trial end, use stim on time. Likewise for trial end and stim off
1528 to_correct = ~np.isnan(out['stimOn_times']) & (out['stimOn_times'] < out['intervals'][:, 0]) 1a
1529 if np.any(to_correct): 1a
1530 _logger.warning('%i/%i stim on events occurring outside trial intervals', sum(to_correct), len(to_correct))
1531 out['intervals'][to_correct, 0] = out['stimOn_times'][to_correct]
1532 to_correct = ~np.isnan(out['stimOff_times']) & (out['stimOff_times'] > out['intervals'][:, 1]) 1a
1533 if np.any(to_correct): 1a
1534 _logger.debug( 1a
1535 '%i/%i stim off events occurring outside trial intervals; using stim off times as trial end',
1536 sum(to_correct), len(to_correct))
1537 out['intervals'][to_correct, 1] = out['stimOff_times'][to_correct] 1a
1539 if display: # pragma: no cover 1a
1540 width = 0.5
1541 ymax = 5
1542 if isinstance(display, bool):
1543 plt.figure('Bpod FPGA Sync')
1544 ax = plt.gca()
1545 else:
1546 ax = display
1547 plots.squares(self.bpod['times'], self.bpod['polarities'] * 0.4 + 1, ax=ax, color='k')
1548 plots.squares(self.frame2ttl['times'], self.frame2ttl['polarities'] * 0.4 + 2, ax=ax, color='k')
1549 plots.squares(self.audio['times'], self.audio['polarities'] * 0.4 + 3, ax=ax, color='k')
1550 color_map = TABLEAU_COLORS.keys()
1551 for (event_name, event_times), c in zip(trials.to_df().items(), cycle(color_map)):
1552 plots.vertical_lines(event_times, ymin=0, ymax=ymax, ax=ax, color=c, label=event_name, linewidth=width)
1553 ax.legend()
1554 ax.set_yticks([0, 1, 2, 3])
1555 ax.set_yticklabels(['', 'bpod', 'f2ttl', 'audio'])
1556 ax.set_ylim([0, 4])
1558 return out 1a
1561def get_sync_and_chn_map(session_path, sync_collection):
1562 """
1563 Return sync and channel map for session based on collection where main sync is stored.
1565 Parameters
1566 ----------
1567 session_path : str, pathlib.Path
1568 The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
1569 sync_collection : str
1570 The session subdirectory where the sync data are located.
1572 Returns
1573 -------
1574 one.alf.io.AlfBunch
1575 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
1576 the corresponding channel numbers.
1577 dict
1578 A map of channel names and their corresponding indices.
1579 """
1580 if sync_collection == 'raw_ephys_data': 1EFfMQSTUNOPtGHaecgbdh
1581 # Check to see if we have nidq files, if we do just go with this otherwise go into other function that deals with
1582 # 3A probes
1583 nidq_meta = next(session_path.joinpath(sync_collection).glob('*nidq.meta'), None) 1EFfMNOPtGHaecgbdh
1584 if not nidq_meta: 1EFfMNOPtGHaecgbdh
1585 sync, chmap = get_main_probe_sync(session_path) 1aecd
1586 else:
1587 sync = load_sync(session_path, sync_collection) 1EFfMNOPtGHagbdh
1588 ef = Bunch() 1EFfMNOPtGHagbdh
1589 ef['path'] = session_path.joinpath(sync_collection) 1EFfMNOPtGHagbdh
1590 ef['nidq'] = nidq_meta 1EFfMNOPtGHagbdh
1591 chmap = get_ibl_sync_map(ef, '3B') 1EFfMNOPtGHagbdh
1593 else:
1594 sync = load_sync(session_path, sync_collection) 1QSTU
1595 chmap = load_channel_map(session_path, sync_collection) 1QSTU
1597 return sync, chmap 1EFfMQSTUNOPtGHaecgbdh
1600def load_channel_map(session_path, sync_collection):
1601 """
1602 Load syncing channel map for session path and collection
1604 Parameters
1605 ----------
1606 session_path : str, pathlib.Path
1607 The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
1608 sync_collection : str
1609 The session subdirectory where the sync data are located.
1611 Returns
1612 -------
1613 dict
1614 A map of channel names and their corresponding indices.
1615 """
1617 device = sync_collection.split('_')[1] 1QSTU
1618 default_chmap = DEFAULT_MAPS[device]['nidq'] 1QSTU
1620 # Try to load channel map from file
1621 chmap = spikeglx.get_sync_map(session_path.joinpath(sync_collection)) 1QSTU
1622 # If chmap provided but not with all keys, fill up with default values
1623 if not chmap: 1QSTU
1624 return default_chmap 1STU
1625 else:
1626 if data_for_keys(default_chmap.keys(), chmap): 1Q
1627 return chmap 1Q
1628 else:
1629 _logger.warning('Keys missing from provided channel map, '
1630 'setting missing keys from default channel map')
1631 return {**default_chmap, **chmap}
1634def load_sync(session_path, sync_collection):
1635 """
1636 Load sync files from session path and collection.
1638 Parameters
1639 ----------
1640 session_path : str, pathlib.Path
1641 The absolute session path, i.e. '/path/to/subject/yyyy-mm-dd/nnn'.
1642 sync_collection : str
1643 The session subdirectory where the sync data are located.
1645 Returns
1646 -------
1647 one.alf.io.AlfBunch
1648 A dictionary with keys ('times', 'polarities', 'channels'), containing the sync pulses and
1649 the corresponding channel numbers.
1650 """
1651 sync = alfio.load_object(session_path.joinpath(sync_collection), 'sync', namespace='spikeglx', short_keys=True) 1EFfMQSTUNOPtGHagbdh
1653 return sync 1EFfMQSTUNOPtGHagbdh