Coverage for ibllib/io/extractors/camera.py: 98%

348 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-10-11 11:13 +0100

1""" Camera extractor functions. 

2This module handles extraction of camera timestamps for both Bpod and DAQ. 

3""" 

4import logging 

5from functools import partial 

6 

7import cv2 

8import numpy as np 

9import matplotlib.pyplot as plt 

10from iblutil.util import range_str 

11 

12import neurodsp.utils as dsp 

13from ibllib.plots import squares, vertical_lines 

14from ibllib.io.video import assert_valid_label, VideoStreamer 

15from iblutil.numerical import within_ranges 

16from ibllib.io.extractors.base import get_session_extractor_type 

17from ibllib.io.extractors.ephys_fpga import get_sync_fronts, get_sync_and_chn_map 

18import ibllib.io.raw_data_loaders as raw 

19from ibllib.io.extractors.base import ( 

20 BaseBpodTrialsExtractor, 

21 BaseExtractor, 

22 run_extractor_classes, 

23 _get_task_types_json_config 

24) 

25 

26_logger = logging.getLogger(__name__) 

27 

28 

29def extract_camera_sync(sync, chmap=None): 

30 """ 

31 Extract camera timestamps from the sync matrix 

32 

33 :param sync: dictionary 'times', 'polarities' of fronts detected on sync trace 

34 :param chmap: dictionary containing channel indices. Default to constant. 

35 :return: dictionary containing camera timestamps 

36 """ 

37 assert chmap 1rupgxsefdq

38 times = {} 1rupgxsefdq

39 for k in filter(lambda x: x.endswith('_camera'), chmap): 1rupgxsefdq

40 label, _ = k.rsplit('_', 1) 1rupgxsefdq

41 times[label] = get_sync_fronts(sync, chmap[k]).times[::2] 1rupgxsefdq

42 return times 1rupgxsefdq

43 

44 

45def get_video_length(video_path): 

46 """ 

47 Returns video length 

48 :param video_path: A path to the video 

49 :return: 

50 """ 

51 is_url = isinstance(video_path, str) and video_path.startswith('http') 1mnrpkgcsefdyobaql

52 cap = VideoStreamer(video_path).cap if is_url else cv2.VideoCapture(str(video_path)) 1mnrpkgcsefdyobaql

53 assert cap.isOpened(), f'Failed to open video file {video_path}' 1mnrpkgcsefdyobaql

54 length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) 1mnrpkgcsefdyobaql

55 cap.release() 1mnrpkgcsefdyobaql

56 return length 1mnrpkgcsefdyobaql

57 

58 

59class CameraTimestampsFPGA(BaseExtractor): 

60 """ 

61 Extractor for videos using DAQ sync and channel map. 

62 """ 

63 

64 def __init__(self, label, session_path=None): 

65 super().__init__(session_path) 1rpgsefdq

66 self.label = assert_valid_label(label) 1rpgsefdq

67 self.save_names = f'_ibl_{label}Camera.times.npy' 1rpgsefdq

68 self.var_names = f'{label}_camera_timestamps' 1rpgsefdq

69 self._log_level = _logger.level 1rpgsefdq

70 _logger.setLevel(logging.DEBUG) 1rpgsefdq

71 

72 def __del__(self): 

73 _logger.setLevel(self._log_level) 1trpgefq

74 

75 def _extract(self, sync=None, chmap=None, video_path=None, sync_label='audio', 

76 display=False, extrapolate_missing=True, **kwargs): 

77 """ 

78 The raw timestamps are taken from the DAQ. These are the times of the camera's frame TTLs. 

79 If the pin state file exists, these timestamps are aligned to the video frames using 

80 task TTLs (typically the audio TTLs). Frames missing from the embedded frame count are 

81 removed from the timestamps array. 

82 If the pin state file does not exist, the left and right camera timestamps may be aligned 

83 using the wheel data. 

84 

85 Parameters 

86 ---------- 

87 sync : dict 

88 Dictionary 'times', 'polarities' of fronts detected on sync trace. 

89 chmap : dict 

90 Dictionary containing channel indices. Default to constant. 

91 video_path : str, pathlib.Path, int 

92 An optional path for fetching the number of frames. If None, the video is loaded from 

93 the session path. If an int is provided this is taken to be the total number of frames. 

94 sync_label : str 

95 The sync label of the channel that's wired to the GPIO for synchronising the times. 

96 display : bool 

97 If true, the TTL and GPIO fronts are plotted. 

98 extrapolate_missing : bool 

99 If true, any missing timestamps at the beginning and end of the session are 

100 extrapolated based on the median frame rate, otherwise they will be NaNs. 

101 **kwargs 

102 Extra keyword arguments (unused). 

103 

104 Returns 

105 ------- 

106 numpy.array 

107 The extracted camera timestamps. 

108 """ 

109 fpga_times = extract_camera_sync(sync=sync, chmap=chmap) 1rpgsefdq

110 count, (*_, gpio) = raw.load_embedded_frame_data(self.session_path, self.label) 1rpgsefdq

111 raw_ts = fpga_times[self.label] 1rpgsefdq

112 

113 if video_path is None: 1rpgsefdq

114 filename = f'_iblrig_{self.label}Camera.raw.mp4' 1rpsefdq

115 video_path = self.session_path.joinpath('raw_video_data', filename) 1rpsefdq

116 # Permit the video path to be the length for development and debugging purposes 

117 length = (video_path if isinstance(video_path, int) else get_video_length(video_path)) 1rpgsefdq

118 _logger.debug(f'Number of video frames = {length}') 1rpgsefdq

119 

120 if gpio is not None and gpio['indices'].size > 1 and sync_label is not None: 1rpgsefdq

121 _logger.info(f'Aligning to {sync_label} TTLs') 1gsefd

122 # Extract sync TTLs 

123 ttl = get_sync_fronts(sync, chmap[sync_label]) 1gsefd

124 _, ts = raw.load_camera_ssv_times(self.session_path, self.label) 1gsefd

125 try: 1gsefd

126 """ 

127 NB: Some of the sync TTLs occur very close together, and are therefore not 

128 reflected in the pin state. This function removes those. Also converts frame 

129 times to DAQ time. 

130 """ 

131 gpio, ttl, ts = groom_pin_state(gpio, ttl, ts, display=display) 1gsefd

132 """ 1gefd

133 The length of the count and pin state are regularly longer than the length of 

134 the video file. Here we assert that the video is either shorter or the same 

135 length as the arrays, and we make an assumption that the missing frames are 

136 right at the end of the video. We therefore simply shorten the arrays to match 

137 the length of the video. 

138 """ 

139 if count.size > length: 1gefd

140 count = count[:length] 1gefd

141 else: 

142 assert length == count.size, 'fewer counts than frames' 1ef

143 assert raw_ts.shape[0] > 0, 'no timestamps found in channel indicated for ' \ 1gefd

144 f'{self.label} camera' 

145 return align_with_gpio(raw_ts, ttl, gpio, count, 1gefd

146 display=display, 

147 extrapolate_missing=extrapolate_missing) 

148 except AssertionError as ex: 1s

149 _logger.critical('Failed to extract using %s: %s', sync_label, ex) 1s

150 

151 # If you reach here extracting using sync TTLs was not possible 

152 _logger.warning('Alignment by wheel data not yet implemented') 1rpsq

153 if length < raw_ts.size: 1rpsq

154 df = raw_ts.size - length 1rpq

155 _logger.info(f'Discarding first {df} pulses') 1rpq

156 raw_ts = raw_ts[df:] 1rpq

157 return raw_ts 1rpsq

158 

159 

160class CameraTimestampsCamlog(BaseExtractor): 

161 def __init__(self, label, session_path=None): 

162 super().__init__(session_path) 1u

163 self.label = assert_valid_label(label) 1u

164 self.save_names = f'_ibl_{label}Camera.times.npy' 1u

165 self.var_names = f'{label}_camera_timestamps' 1u

166 self._log_level = _logger.level 1u

167 _logger.setLevel(logging.DEBUG) 1u

168 

169 def __del__(self): 

170 _logger.setLevel(self._log_level) 1u

171 

172 def _extract(self, sync=None, chmap=None, video_path=None, 

173 display=False, extrapolate_missing=True, **kwargs): 

174 

175 fpga_times = extract_camera_sync(sync=sync, chmap=chmap) 1u

176 video_frames = get_video_length(self.session_path.joinpath('raw_video_data', f'_iblrig_{self.label}Camera.raw.mp4')) 1u

177 raw_ts = fpga_times[self.label] 1u

178 

179 # For left camera sometimes we have one extra pulse than video frame 

180 if (raw_ts.size - video_frames) == 1: 1u

181 _logger.warning(f'One extra sync pulse detected for {self.label} camera') 

182 raw_ts = raw_ts[:-1] 

183 elif (raw_ts.size - video_frames) == -1: 1u

184 _logger.warning(f'One extra video frame detected for {self.label} camera') 

185 med_time = np.median(np.diff(raw_ts)) 

186 raw_ts = np.r_[raw_ts, np.array([raw_ts[-1] + med_time])] 

187 

188 assert video_frames == raw_ts.size, f'dimension mismatch between video frames and TTL pulses for {self.label} camera' \ 1u

189 f'by {np.abs(video_frames - raw_ts.size)} frames' 

190 

191 return raw_ts 1u

192 

193 

194class CameraTimestampsBpod(BaseBpodTrialsExtractor): 

195 """ 

196 Get the camera timestamps from the Bpod 

197 

198 The camera events are logged only during the events not in between, so the times need 

199 to be interpolated 

200 """ 

201 save_names = '_ibl_leftCamera.times.npy' 

202 var_names = 'left_camera_timestamps' 

203 

204 def __init__(self, *args, **kwargs): 

205 super().__init__(*args, **kwargs) 1mnkcobal

206 self._log_level = _logger.level 1mnkcobal

207 _logger.setLevel(logging.DEBUG) 1mnkcobal

208 

209 def __del__(self): 

210 _logger.setLevel(self._log_level) 1tmnkcbl

211 

212 def _extract(self, video_path=None, display=False, extrapolate_missing=True, **kwargs): 

213 """ 

214 The raw timestamps are taken from the Bpod. These are the times of the camera's frame TTLs. 

215 If the pin state file exists, these timestamps are aligned to the video frames using the 

216 sync TTLs. Frames missing from the embedded frame count are removed from the timestamps 

217 array. 

218 If the pin state file does not exist, the left camera timestamps may be aligned using the 

219 wheel data. 

220 :param video_path: an optional path for fetching the number of frames. If None, 

221 the video is loaded from the session path. If an int is provided this is taken to be 

222 the total number of frames. 

223 :param display: if True, the TTL and GPIO fronts are plotted. 

224 :param extrapolate_missing: if True, any missing timestamps at the beginning and end of 

225 the session are extrapolated based on the median frame rate, otherwise they will be NaNs. 

226 :return: a numpy array of camera timestamps 

227 """ 

228 raw_ts = self._times_from_bpod() 1mnkcobal

229 count, (*_, gpio) = raw.load_embedded_frame_data(self.session_path, 'left') 1mnkcobal

230 if video_path is None: 1mnkcobal

231 filename = '_iblrig_leftCamera.raw.mp4' 1mnoba

232 video_path = self.session_path.joinpath('raw_video_data', filename) 1mnoba

233 # Permit the video path to be the length for development and debugging purposes 

234 length = video_path if isinstance(video_path, int) else get_video_length(video_path) 1mnkcobal

235 _logger.debug(f'Number of video frames = {length}') 1mnkcobal

236 

237 # Check if the GPIO is usable for extraction. GPIO is None if the file does not exist, 

238 # is empty, or contains only one value (i.e. doesn't change) 

239 if gpio is not None and gpio['indices'].size > 1: 1mnkcobal

240 _logger.info('Aligning to sync TTLs') 1cba

241 # Extract audio TTLs 

242 _, audio = raw.load_bpod_fronts(self.session_path, data=self.bpod_trials, 1cba

243 task_collection=self.task_collection) 

244 _, ts = raw.load_camera_ssv_times(self.session_path, 'left') 1cba

245 """ 1cba

246 There are many sync TTLs that are for some reason missed by the GPIO. Conversely 

247 the last GPIO doesn't often correspond to any audio TTL. These will be removed. 

248 The drift appears to be less severe than the DAQ, so when assigning TTLs we'll take 

249 the nearest TTL within 500ms. The go cue TTLs comprise two short pulses ~3ms apart. 

250 We will fuse any TTLs less than 5ms apart to make assignment more accurate. 

251 """ 

252 try: 1cba

253 gpio, audio, ts = groom_pin_state(gpio, audio, ts, take='nearest', 1cba

254 tolerance=.5, min_diff=5e-3, display=display) 

255 if count.size > length: 1cba

256 count = count[:length] 1a

257 else: 

258 assert length == count.size, 'fewer counts than frames' 1cba

259 

260 return align_with_gpio(raw_ts, audio, gpio, count, 1cba

261 extrapolate_missing, display=display) 

262 except AssertionError as ex: 1a

263 _logger.critical('Failed to extract using audio: %s', ex) 1a

264 

265 # If you reach here extracting using audio TTLs was not possible 

266 _logger.warning('Alignment by wheel data not yet implemented') 1mnkoal

267 # Extrapolate at median frame rate 

268 n_missing = length - raw_ts.size 1mnkoal

269 if n_missing > 0: 1mnkoal

270 _logger.warning(f'{n_missing} fewer Bpod timestamps than frames; ' 1oa

271 f'{"extrapolating" if extrapolate_missing else "appending nans"}') 

272 frate = np.median(np.diff(raw_ts)) 1oa

273 to_app = ((np.arange(n_missing, ) + 1) / frate + raw_ts[-1] 1oa

274 if extrapolate_missing 

275 else np.full(n_missing, np.nan)) 

276 raw_ts = np.r_[raw_ts, to_app] # Append the missing times 1oa

277 elif n_missing < 0: 1mnkol

278 _logger.warning(f'{abs(n_missing)} fewer frames than Bpod timestamps') 1mnol

279 _logger.info(f'Discarding first {abs(n_missing)} pulses') 1mnol

280 raw_ts = raw_ts[abs(n_missing):] 1mnol

281 

282 return raw_ts 1mnkoal

283 

284 def _times_from_bpod(self): 

285 ntrials = len(self.bpod_trials) 1mnkcobal

286 

287 cam_times = [] 1mnkcobal

288 n_frames = 0 1mnkcobal

289 n_out_of_sync = 0 1mnkcobal

290 missed_trials = [] 1mnkcobal

291 for ind in np.arange(ntrials): 1mnkcobal

292 # get upgoing and downgoing fronts 

293 pin = np.array(self.bpod_trials[ind]['behavior_data'] 1mnkcobal

294 ['Events timestamps'].get('Port1In')) 

295 pout = np.array(self.bpod_trials[ind]['behavior_data'] 1mnkcobal

296 ['Events timestamps'].get('Port1Out')) 

297 # some trials at startup may not have the camera working, discard 

298 if np.all(pin) is None: 1mnkcobal

299 missed_trials.append(ind) 1ka

300 continue 1ka

301 # if the trial starts in the middle of a square, discard the first downgoing front 

302 if pout[0] < pin[0]: 1mnkcobal

303 pout = pout[1:] 1mnkcobal

304 # same if the last sample is during an upgoing front, 

305 # always put size as it happens last 

306 pin = pin[:pout.size] 1mnkcobal

307 frate = np.median(np.diff(pin)) 1mnkcobal

308 if ind > 0: 1mnkcobal

309 """ 

310 assert that the pulses have the same length and that we don't miss frames during 

311 the trial, the refresh rate of bpod is 100us 

312 """ 

313 test1 = np.all(np.abs(1 - (pin - pout) / np.median(pin - pout)) < 0.1) 1mnkcobal

314 test2 = np.all(np.abs(np.diff(pin) - frate) <= 0.00011) 1mnkcobal

315 if not all([test1, test2]): 1mnkcobal

316 n_out_of_sync += 1 1coba

317 # grow a list of cam times for ech trial 

318 cam_times.append(pin) 1mnkcobal

319 n_frames += pin.size 1mnkcobal

320 

321 if missed_trials: 1mnkcobal

322 _logger.debug('trial(s) %s missing TTL events', range_str(missed_trials)) 1ka

323 if n_out_of_sync > 0: 1mnkcobal

324 _logger.warning(f"{n_out_of_sync} trials with bpod camera frame times not within" 1coba

325 f" 10% of the expected sampling rate") 

326 

327 t_first_frame = np.array([c[0] for c in cam_times]) 1mnkcobal

328 t_last_frame = np.array([c[-1] for c in cam_times]) 1mnkcobal

329 frate = 1 / np.nanmedian(np.array([np.median(np.diff(c)) for c in cam_times])) 1mnkcobal

330 intertrial_duration = t_first_frame[1:] - t_last_frame[:-1] 1mnkcobal

331 intertrial_missed_frames = np.int32(np.round(intertrial_duration * frate)) - 1 1mnkcobal

332 

333 # initialize the full times array 

334 frame_times = np.zeros(n_frames + int(np.sum(intertrial_missed_frames))) 1mnkcobal

335 ii = 0 1mnkcobal

336 for trial, cam_time in enumerate(cam_times): 1mnkcobal

337 if cam_time is not None: 1mnkcobal

338 # populate first the recovered times within the trials 

339 frame_times[ii: ii + cam_time.size] = cam_time 1mnkcobal

340 ii += cam_time.size 1mnkcobal

341 if trial == (len(cam_times) - 1): 1mnkcobal

342 break 1mnkcobal

343 # then extrapolate in-between 

344 nmiss = intertrial_missed_frames[trial] 1mnkcobal

345 frame_times[ii: ii + nmiss] = (cam_time[-1] + intertrial_duration[trial] / 1mnkcobal

346 (nmiss + 1) * (np.arange(nmiss) + 1)) 

347 ii += nmiss 1mnkcobal

348 assert all(np.diff(frame_times) > 0) # negative diffs implies a big problem 1mnkcobal

349 return frame_times 1mnkcobal

350 

351 

352def align_with_gpio(timestamps, ttl, pin_state, count, extrapolate_missing=True, display=False): 

353 """ 

354 Groom the raw DAQ or Bpod camera timestamps using the frame embedded GPIO and frame counter. 

355 

356 Parameters 

357 ---------- 

358 timestamps : numpy.array 

359 An array of raw DAQ or Bpod camera timestamps. 

360 ttl : dict 

361 A dictionary of DAQ sync TTLs, with keys {'times', 'polarities'}. 

362 pin_state : dict 

363 A dictionary containing GPIO pin state values, with keys {'indices', 'polarities'}. 

364 count : numpy.array 

365 An array of frame numbers. 

366 extrapolate_missing : bool 

367 If true and the number of timestamps is fewer than the number of frame counts, the 

368 remaining timestamps are extrapolated based on the frame rate, otherwise they are NaNs. 

369 display : bool 

370 Plot the resulting timestamps. 

371 

372 Returns 

373 ------- 

374 numpy.array 

375 The corrected frame timestamps. 

376 """ 

377 # Some assertions made on the raw data 

378 # assert count.size == pin_state.size, 'frame count and pin state size mismatch' 

379 assert all(np.diff(count) > 0), 'frame count not strictly increasing' 1gcefdba

380 assert all(np.diff(timestamps) > 0), 'DAQ/Bpod camera times not strictly increasing' 1gcefdba

381 same_n_ttl = pin_state['times'].size == ttl['times'].size 1gcefdba

382 assert same_n_ttl, 'more ttl TTLs detected on camera than TTLs sent' 1gcefdba

383 

384 """Here we will ensure that the DAQ camera times match the number of video frames in 1gcefdba

385 length. We will make the following assumptions: 

386 

387 1. The number of DAQ camera times is equal to or greater than the number of video frames. 

388 2. No TTLs were missed between the camera and DAQ. 

389 3. No pin states were missed by Bonsai. 

390 4 No pixel count data was missed by Bonsai. 

391 

392 In other words the count and pin state arrays accurately reflect the number of frames 

393 sent by the camera and should therefore be the same length, and the length of the frame 

394 counter should match the number of saved video frames. 

395 

396 The missing frame timestamps are removed in three stages: 

397 

398 1. Remove any timestamps that occurred before video frame acquisition in Bonsai. 

399 2. Remove any timestamps where the frame counter reported missing frames, i.e. remove the 

400 dropped frames which occurred throughout the session. 

401 3. Remove the trailing timestamps at the end of the session if the camera was turned off 

402 in the wrong order. 

403 """ 

404 # Align on first pin state change 

405 first_uptick = pin_state['indices'][0] 1gcefdba

406 first_ttl = np.searchsorted(timestamps, ttl['times'][0]) 1gcefdba

407 """Here we find up to which index in the DAQ times we discard by taking the difference 1gcefdba

408 between the index of the first pin state change (when the sync TTL was reported by the 

409 camera) and the index of the first sync TTL in DAQ time. We subtract the difference 

410 between the frame count at the first pin state change and the index to account for any 

411 video frames that were not saved during this period (we will remove those from the 

412 camera DAQ times later). 

413 """ 

414 # Minus any frames that were dropped between the start of frame acquisition and the first TTL 

415 start = first_ttl - first_uptick - (count[first_uptick] - first_uptick) 1gcefdba

416 # Get approximate frame rate for extrapolating timestamps (if required) 

417 frate = round(1 / np.nanmedian(np.diff(timestamps))) 1gcefdba

418 

419 if start < 0: 1gcefdba

420 n_missing = abs(start) 1cba

421 _logger.warning(f'{n_missing} missing DAQ/Bpod timestamp(s) at start; ' 1cba

422 f'{"extrapolating" if extrapolate_missing else "prepending nans"}') 

423 to_app = (timestamps[0] - (np.arange(n_missing, 0, -1) + 1) / frate 1cba

424 if extrapolate_missing 

425 else np.full(n_missing, np.nan)) 

426 timestamps = np.r_[to_app, timestamps] # Prepend the missing times 1cba

427 start = 0 1cba

428 

429 # Remove the extraneous timestamps from the beginning and end 

430 end = count[-1] + 1 + start 1gcefdba

431 ts = timestamps[start:end] 1gcefdba

432 if (n_missing := count[-1] - ts.size + 1) > 0: 1gcefdba

433 """ 

434 For ephys sessions there may be fewer DAQ times than frame counts if DAQ acquisition is 

435 turned off before the video acquisition workflow. For Bpod this always occurs because Bpod 

436 finishes before the camera workflow. For Bpod the times are already extrapolated for 

437 these late frames.""" 

438 _logger.warning(f'{n_missing} fewer DAQ/Bpod timestamps than frame counts; ' 1gcefdba

439 f'{"extrapolating" if extrapolate_missing else "appending nans"}') 

440 to_app = ((np.arange(n_missing, ) + 1) / frate + ts[-1] 1gcefdba

441 if extrapolate_missing 

442 else np.full(n_missing, np.nan)) 

443 ts = np.r_[ts, to_app] # Append the missing times 1gcefdba

444 assert ts.size >= count.size, 'fewer timestamps than frame counts' 1gcefdba

445 assert ts.size == count[-1] + 1, 'more frames recorded in frame count than timestamps ' 1gcefdba

446 

447 # Remove the rest of the dropped frames 

448 ts = ts[count] 1gcefdba

449 assert np.searchsorted(ts, ttl['times'][0]) == first_uptick, \ 1gcefdba

450 'time of first sync TTL doesn\'t match after alignment' 

451 if ts.size != count.size: 1gcefdba

452 _logger.error('number of timestamps and frames don\'t match after alignment') 

453 

454 if display: 1gcefdba

455 # Plot to check 

456 fig, axes = plt.subplots(1, 1) 1da

457 y = within_ranges(np.arange(ts.size), pin_state['indices'].reshape(-1, 2)).astype(float) 1da

458 y *= 1e-5 # For scale when zoomed in 1da

459 axes.plot(ts, y, marker='d', color='blue', drawstyle='steps-pre', label='GPIO') 1da

460 axes.plot(ts, np.zeros_like(ts), 'kx', label='DAQ timestamps') 1da

461 vertical_lines(ttl['times'], ymin=0, ymax=1e-5, 1da

462 color='r', linestyle=':', ax=axes, label='sync TTL') 

463 plt.legend() 1da

464 

465 return ts 1gcefdba

466 

467 

468def attribute_times(arr, events, tol=.1, injective=True, take='first'): 

469 """ 

470 Returns the values of the first array that correspond to those of the second. 

471 

472 Given two arrays of timestamps, the function will return the values of the first array 

473 that most likely correspond to the values of the second. For each of the values in the 

474 second array, the absolute difference is taken and the index of either the first sufficiently 

475 close value, or simply the closest one, is assigned. 

476 

477 If injective is True, once a value has been assigned to an event it can't be assigned to 

478 another. In other words there is a one-to-one mapping between the two arrays. 

479 

480 Parameters 

481 ---------- 

482 arr : numpy.array 

483 An array of event times to attribute to those in `events`. 

484 events : numpy.array 

485 An array of event times considered a subset of `arr`. 

486 tol : float 

487 The max absolute difference between values in order to be considered a match. 

488 injective : bool 

489 If true, once a value has been assigned it will not be assigned again. 

490 take : {'first', 'nearest', 'after'} 

491 If 'first' the first value within tolerance is assigned; if 'nearest' the 

492 closest value is assigned; if 'after' assign the first event after. 

493 

494 Returns 

495 ------- 

496 numpy.array 

497 An array the same length as `events`. 

498 """ 

499 if (take := take.lower()) not in ('first', 'nearest', 'after'): 1viwgcefdjbah

500 raise ValueError('Parameter `take` must be either "first", "nearest", or "after"') 1v

501 stack = np.ma.masked_invalid(arr, copy=False) 1viwgcefdjbah

502 stack.fill_value = np.inf 1viwgcefdjbah

503 assigned = np.full(events.shape, -1, dtype=int) # Initialize output array 1viwgcefdjbah

504 min_tol = 0 if take == 'after' else -tol 1viwgcefdjbah

505 for i, x in enumerate(events): 1viwgcefdjbah

506 dx = stack.filled() - x 1viwgcefdjbah

507 candidates = np.logical_and(min_tol < dx, dx < tol) 1viwgcefdjbah

508 if any(candidates): # is any value within tolerance 1viwgcefdjbah

509 idx = np.abs(dx).argmin() if take == 'nearest' else np.where(candidates)[0][0] 1viwgcefdjbah

510 assigned[i] = idx 1viwgcefdjbah

511 stack.mask[idx] = injective # If one-to-one, remove the assigned value 1viwgcefdjbah

512 return assigned 1viwgcefdjbah

513 

514 

515def groom_pin_state(gpio, ttl, ts, tolerance=2., display=False, take='first', min_diff=0.): 

516 """ 

517 Align the GPIO pin state to the DAQ sync TTLs. Any sync TTLs not reflected in the pin 

518 state are removed from the dict and the times of the detected fronts are converted to DAQ 

519 time. At the end of this the number of GPIO fronts should equal the number of TTLs. 

520 

521 Note: 

522 - This function is ultra safe: we probably don't need assign all the ups and down fronts. 

523 separately and could potentially even align the timestamps without removing the missed fronts 

524 - The input gpio and TTL dicts may be modified by this function. 

525 - For training sessions the frame rate is only 30Hz and the TTLs tend to be broken up by 

526 small gaps. Setting the min_diff to 5ms helps the timestamp assignment accuracy. 

527 

528 Parameters 

529 ---------- 

530 gpio : dict 

531 A dictionary containing GPIO pin state values, with keys {'indices', 'polarities'}. 

532 ttl : dict 

533 A dictionary of DAQ sync TTLs, with keys {'times', 'polarities'}. 

534 ts : numpy.array 

535 The camera frame times (the camera frame TTLs acquired by the main DAQ). 

536 tolerance : float 

537 Two pulses need to be within this many seconds to be considered related. 

538 display : bool 

539 If true, the resulting timestamps are plotted against the raw audio signal. 

540 take : {'first', 'nearest'} 

541 If 'first' the first value within tolerance is assigned; if 'nearest' the 

542 closest value is assigned. 

543 min_diff : float 

544 Sync TTL fronts less than min_diff seconds apart will be removed. 

545 

546 Returns 

547 ------- 

548 dict 

549 Dictionary of GPIO DAQ front indices, polarities and DAQ aligned times. 

550 dict 

551 Sync TTL times and polarities sans the TTLs not detected in the frame data. 

552 numpy.array 

553 Frame times in DAQ time. 

554 

555 See Also 

556 -------- 

557 ibllib.io.extractors.ephys_fpga._get_sync_fronts 

558 """ 

559 # Check that the dimensions match 

560 if np.any(gpio['indices'] >= ts.size): 1igcsefdjbah

561 _logger.warning('GPIO events occurring beyond timestamps array length') 1j

562 keep = gpio['indices'] < ts.size 1j

563 gpio = {k: gpio[k][keep] for k, v in gpio.items()} 1j

564 assert ttl and ttl['times'].size > 0, 'no sync TTLs for session' 1igcsefdjbah

565 assert ttl['times'].size == ttl['polarities'].size, 'sync TTL data dimension mismatch' 1igcsefdjbah

566 # make sure that there are no 2 consecutive fall or consecutive rise events 

567 assert np.all(np.abs(np.diff(ttl['polarities'])) == 2), 'consecutive high/low sync TTL events' 1igcsefdjbah

568 # make sure first TTL is high 

569 assert ttl['polarities'][0] == 1 1igcsefdjbah

570 # make sure ttl times in order 

571 assert np.all(np.diff(ttl['times']) > 0) 1igcsefdjbah

572 # make sure raw timestamps increase 

573 assert np.all(np.diff(ts) > 0), 'timestamps must strictly increase' 1igcsefdjbah

574 # make sure there are state changes 

575 assert gpio['indices'].any(), 'no TTLs detected in GPIO' 1igcsefdjbah

576 # # make sure first GPIO state is high 

577 assert gpio['polarities'][0] == 1 1igcsefdjbah

578 """ 1igcefdjbah

579 Some sync TTLs appear to be so short that they are not recorded by the camera. These can 

580 be as short as a few microseconds. Applying a cutoff based on framerate was unsuccessful. 

581 Assigning each sync TTL to each pin state change is not easy because some onsets occur very 

582 close together (sometimes < 70ms), on the order of the delay between TTL and frame time. 

583 Also, the two clocks have some degree of drift, so the delay between sync TTL and pin state 

584 change may be zero or even negative. 

585 

586 Here we split the events into sync TTL onsets (lo->hi) and TTL offsets (hi->lo). For each 

587 uptick in the GPIO pin state, we take the first TTL onset time that was within 100ms of it. 

588 We ensure that each sync TTL is assigned only once, so a TTL that is closer to frame 3 than 

589 frame 1 may still be assigned to frame 1. 

590 """ 

591 ifronts = gpio['indices'] # The pin state flips 1igcefdjbah

592 sync_times = ttl['times'] 1igcefdjbah

593 if ifronts.size != ttl['times'].size: 1igcefdjbah

594 _logger.warning('more sync TTLs than GPIO state changes, assigning timestamps') 1igcefdjbah

595 to_remove = np.zeros(ifronts.size, dtype=bool) # unassigned GPIO fronts to remove 1igcefdjbah

596 low2high = ifronts[gpio['polarities'] == 1] 1igcefdjbah

597 high2low = ifronts[gpio['polarities'] == -1] 1igcefdjbah

598 assert low2high.size >= high2low.size 1igcefdjbah

599 

600 # Remove and/or fuse short TTLs 

601 if min_diff > 0: 1igcefdjbah

602 short, = np.where(np.diff(ttl['times']) < min_diff) 1icbah

603 sync_times = np.delete(ttl['times'], np.r_[short, short + 1]) 1icbah

604 _logger.debug(f'Removed {short.size * 2} fronts TLLs less than ' 1icbah

605 f'{min_diff * 1e3:.0f}ms apart') 

606 assert sync_times.size > 0, f'all sync TTLs less than {min_diff}s' 1icbah

607 

608 # Onsets 

609 ups = ts[low2high] - ts[low2high][0] # times relative to first GPIO high 1igcefdjbah

610 onsets = sync_times[::2] - sync_times[0] # TTL times relative to first onset 1igcefdjbah

611 # assign GPIO fronts to ttl onset 

612 assigned = attribute_times(onsets, ups, tol=tolerance, take=take) 1igcefdjbah

613 unassigned = np.setdiff1d(np.arange(onsets.size), assigned[assigned > -1]) 1igcefdjbah

614 if unassigned.size > 0: 1igcefdjbah

615 _logger.debug(f'{unassigned.size} sync TTL rises were not detected by the camera') 1igcefdjbah

616 # Check that all pin state upticks could be attributed to an onset TTL 

617 if np.any(missed := assigned == -1): 1igcefdjbah

618 _logger.warning(f'{sum(missed)} pin state rises could not be attributed to a sync TTL') 1icjbah

619 if display: 1icjbah

620 ax = plt.subplot() 1a

621 vertical_lines(ups[assigned > -1], 1a

622 linestyle='-', color='g', ax=ax, 

623 label='assigned GPIO up state') 

624 vertical_lines(ups[missed], 1a

625 linestyle='-', color='r', ax=ax, 

626 label='unassigned GPIO up state') 

627 vertical_lines(onsets[unassigned], 1a

628 linestyle=':', color='k', ax=ax, 

629 alpha=0.3, label='sync TTL onset') 

630 vertical_lines(onsets[assigned], 1a

631 linestyle=':', color='b', ax=ax, label='assigned TTL onset') 

632 plt.legend() 1a

633 plt.show() 1a

634 # Remove the missed fronts 

635 to_remove = np.in1d(gpio['indices'], low2high[missed]) 1icjbah

636 assigned = assigned[~missed] 1icjbah

637 onsets_ = sync_times[::2][assigned] 1igcefdjbah

638 

639 # Offsets 

640 downs = ts[high2low] - ts[high2low][0] 1igcefdjbah

641 offsets = sync_times[1::2] - sync_times[1] 1igcefdjbah

642 assigned = attribute_times(offsets, downs, tol=tolerance, take=take) 1igcefdjbah

643 unassigned = np.setdiff1d(np.arange(offsets.size), assigned[assigned > -1]) 1igcefdjbah

644 if unassigned.size > 0: 1igcefdjbah

645 _logger.debug(f'{unassigned.size} sync TTL falls were not detected by the camera') 1gcefdjbah

646 # Check that all pin state downticks could be attributed to an offset TTL 

647 if np.any(missed := assigned == -1): 1igcefdjbah

648 _logger.warning(f'{sum(missed)} pin state falls could not be attributed to a sync TTL') 1cjbah

649 # Remove the missed fronts 

650 to_remove |= np.in1d(gpio['indices'], high2low[missed]) 1cjbah

651 assigned = assigned[~missed] 1cjbah

652 offsets_ = sync_times[1::2][assigned] 1igcefdjbah

653 

654 # Sync TTLs groomed 

655 if np.any(to_remove): 1igcefdjbah

656 # Check for any orphaned fronts (only one pin state edge was assigned) 

657 to_remove = np.pad(to_remove, (0, to_remove.size % 2), 'edge') # Ensure even size 1icjbah

658 # Perform xor to find GPIOs where only onset or offset is marked for removal 

659 orphaned = to_remove.reshape(-1, 2).sum(axis=1) == 1 1icjbah

660 if orphaned.any(): 1icjbah

661 """If there are orphaned GPIO fronts (i.e. only one edge was assigned to a sync 

662 TTL front), remove the orphaned front its assigned sync TTL. In other words 

663 if both edges cannot be assigned to a sync TTL, we ignore the TTL entirely. 

664 This is a sign that the assignment was bad and extraction may fail.""" 

665 _logger.warning('Some onsets but not offsets (or vice versa) were not assigned; ' 1ih

666 'this may be a sign of faulty wiring or clock drift') 

667 # Find indices of GPIO upticks where only the downtick was marked for removal 

668 orphaned_onsets, = np.where(~to_remove.reshape(-1, 2)[:, 0] & orphaned) 1ih

669 # The onsets_ array already has the other TTLs removed (same size as to_remove == 

670 # False) so subtract the number of removed elements from index. 

671 for i, v in enumerate(orphaned_onsets): 1ih

672 orphaned_onsets[i] -= to_remove.reshape(-1, 2)[:v, 0].sum() 1h

673 # Same for offsets... 

674 orphaned_offsets, = np.where(~to_remove.reshape(-1, 2)[:, 1] & orphaned) 1ih

675 for i, v in enumerate(orphaned_offsets): 1ih

676 orphaned_offsets[i] -= to_remove.reshape(-1, 2)[:v, 1].sum() 1ih

677 # Remove orphaned ttl onsets and offsets 

678 onsets_ = np.delete(onsets_, orphaned_onsets[orphaned_onsets < onsets_.size]) 1ih

679 offsets_ = np.delete(offsets_, orphaned_offsets[orphaned_offsets < offsets_.size]) 1ih

680 _logger.debug(f'{orphaned.sum()} orphaned TTLs removed') 1ih

681 to_remove.reshape(-1, 2)[orphaned] = True 1ih

682 

683 # Remove those unassigned GPIOs 

684 gpio = {k: v[~to_remove[:v.size]] for k, v in gpio.items()} 1icjbah

685 ifronts = gpio['indices'] 1icjbah

686 

687 # Assert that we've removed discrete TTLs 

688 # A failure means e.g. an up-going front of one TTL was missed but not the down-going one. 

689 assert np.all(np.abs(np.diff(gpio['polarities'])) == 2) 1icjbah

690 assert gpio['polarities'][0] == 1 1icjbah

691 

692 ttl_ = {'times': np.empty(ifronts.size), 'polarities': gpio['polarities']} 1igcefdjbah

693 ttl_['times'][::2] = onsets_ 1igcefdjbah

694 ttl_['times'][1::2] = offsets_ 1igcefdjbah

695 else: 

696 ttl_ = ttl.copy() 1ij

697 

698 # Align the frame times to DAQ 

699 fcn_a2b, drift_ppm = dsp.sync_timestamps(ts[ifronts], ttl_['times']) 1igcefdjbah

700 _logger.debug(f'frame ttl alignment drift = {drift_ppm:.2f}ppm') 1igcefdjbah

701 # Add times to GPIO dict 

702 gpio['times'] = fcn_a2b(ts[ifronts]) 1igcefdjbah

703 

704 if display: 1igcefdjbah

705 # Plot all the onsets and offsets 

706 ax = plt.subplot() 1da

707 # All sync TTLs 

708 squares(ttl['times'], ttl['polarities'], 1da

709 ax=ax, label='sync TTLs', linestyle=':', color='k', yrange=[0, 1], alpha=0.3) 

710 # GPIO 

711 x = np.insert(gpio['times'], 0, 0) 1da

712 y = np.arange(x.size) % 2 1da

713 squares(x, y, ax=ax, label='GPIO') 1da

714 y = within_ranges(np.arange(ts.size), ifronts.reshape(-1, 2)) # 0 or 1 for each frame 1da

715 ax.plot(fcn_a2b(ts), y, 'kx', label='cam times') 1da

716 # Assigned ttl 

717 squares(ttl_['times'], ttl_['polarities'], 1da

718 ax=ax, label='assigned sync TTL', linestyle=':', color='g', yrange=[0, 1]) 

719 ax.legend() 1da

720 plt.xlabel('DAQ time (s)') 1da

721 ax.set_yticks([0, 1]) 1da

722 ax.set_title('GPIO - sync TTL alignment') 1da

723 plt.show() 1da

724 

725 return gpio, ttl_, fcn_a2b(ts) 1igcefdjbah

726 

727 

728def extract_all(session_path, sync_type=None, save=True, **kwargs): 

729 """ 

730 For the IBL ephys task, reads ephys binary file and extract: 

731 - video time stamps 

732 :param session_type: the session type to extract, i.e. 'ephys', 'training' or 'biased'. If 

733 None the session type is inferred from the settings file. 

734 :param save: Bool, defaults to False 

735 :param kwargs: parameters to pass to the extractor 

736 :return: outputs, files 

737 

738 Parameters 

739 ---------- 

740 session_path : str, pathlib.Path 

741 The session path, e.g. '/path/to/subject/yyyy-mm-dd/001'. 

742 sync_type : str 

743 The sync label from the experiment description file. 

744 sync_collection : str 

745 The subdirectory containing the sync files. 

746 save : bool 

747 If True, save the camera timestamp files to disk. 

748 session_type : str 

749 (DEPRECATED) The session type, e.g. 'ephys'. 

750 **kwargs 

751 Extra keyword args to pass to the camera extractor classes. 

752 

753 Returns 

754 ------- 

755 list of numpy.array 

756 List of extracted output data, i.e. the camera times. 

757 list of pathlib.Path 

758 The paths of the extracted data, if save = True 

759 """ 

760 

761 sync_collection = kwargs.get('sync_collection', 'raw_ephys_data') 1mnrupkgcefbql

762 camlog = kwargs.get('camlog', False) 1mnrupkgcefbql

763 

764 if not sync_type: # infer from session type 1mnrupkgcefbql

765 session_type = kwargs.get('session_type') or get_session_extractor_type(session_path) 1pkefbql

766 if not session_type or session_type not in _get_task_types_json_config().values(): 1pkefbql

767 raise ValueError(f"Session type {session_type} has no matching extractor") 

768 else: 

769 sync_type = 'nidq' if session_type == 'ephys' else 'bpod' 1pkefbql

770 

771 if sync_type == 'nidq': 1mnrupkgcefbql

772 labels = assert_valid_label(kwargs.pop('labels', ('left', 'right', 'body'))) 1rupgefq

773 labels = (labels,) if isinstance(labels, str) else labels # Ensure list/tuple 1rupgefq

774 CamExtractor = CameraTimestampsCamlog if camlog else CameraTimestampsFPGA 1rupgefq

775 extractor = [partial(CamExtractor, label) for label in labels] 1rupgefq

776 if 'sync' not in kwargs: 1rupgefq

777 kwargs['sync'], kwargs['chmap'] = get_sync_and_chn_map(session_path, sync_collection) 1rupefq

778 else: # assume Bpod otherwise 

779 assert kwargs.pop('labels', 'left'), 'only left camera is currently supported' 1mnkcbl

780 extractor = CameraTimestampsBpod 1mnkcbl

781 

782 outputs, files = run_extractor_classes( 1mnrupkgcefbql

783 extractor, session_path=session_path, save=save, **kwargs) 

784 return outputs, files 1mnrupkgcefbql