Coverage for ibllib/io/extractors/ephys_passive.py: 71%

310 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-10-11 11:13 +0100

1#!/usr/bin/env python 

2# -*- coding:utf-8 -*- 

3# @Author: Niccolò Bonacchi 

4# @Date: Monday, September 7th 2020, 11:51:17 am 

5import json 

6import logging 

7from pathlib import Path 

8from typing import Tuple 

9 

10import matplotlib.pyplot as plt 

11import numpy as np 

12import pandas as pd 

13 

14import ibllib.io.raw_data_loaders as rawio 

15from ibllib.io.extractors import ephys_fpga 

16from ibllib.io.extractors.base import BaseExtractor 

17from ibllib.io.extractors.passive_plotting import ( 

18 plot_audio_times, 

19 plot_gabor_times, 

20 plot_passive_periods, 

21 plot_rfmapping, 

22 plot_stims_times, 

23 plot_sync_channels, 

24 plot_valve_times, 

25) 

26 

27log = logging.getLogger("ibllib") 

28 

29# hardcoded var 

30FRAME_FS = 60 # Sampling freq of the ipad screen, in Hertz 

31FS_FPGA = 30000 # Sampling freq of the neural recording system screen, in Hertz 

32NVALVE = 40 # number of expected valve clicks 

33NGABOR = 20 + 20 * 4 * 2 # number of expected Gabor patches 

34NTONES = 40 

35NNOISES = 40 

36DEBUG_PLOTS = False 

37 

38dataset_types = [ 

39 "_spikeglx_sync.times", 

40 "_spikeglx_sync.channels", 

41 "_spikeglx_sync.polarities", 

42 "_iblrig_RFMapStim.raw", 

43 "_iblrig_stimPositionScreen.raw", 

44 "_iblrig_syncSquareUpdate.raw", 

45 "ephysData.raw.meta", 

46 "_iblrig_taskSettings.raw", 

47 "_iblrig_taskData.raw", 

48] 

49 

50min_dataset_types = [ 

51 "_spikeglx_sync.times", 

52 "_spikeglx_sync.channels", 

53 "_spikeglx_sync.polarities", 

54 "_iblrig_RFMapStim.raw", 

55 "ephysData.raw.meta", 

56 "_iblrig_taskSettings.raw", 

57 "_iblrig_taskData.raw", 

58] 

59 

60 

61# load session fixtures 

62def _load_passive_session_fixtures(session_path: str, task_collection: str = 'raw_passive_data') -> dict: 

63 """load_passive_session_fixtures Loads corresponding ephys session fixtures 

64 

65 :param session_path: the path to a session 

66 :type session_path: str 

67 :return: position contrast phase delays and stim id's 

68 :rtype: dict 

69 """ 

70 

71 # THIS CAN BE PREGENERATED SESSION NO 

72 settings = rawio.load_settings(session_path, task_collection=task_collection) 1abcd

73 ses_nb = settings['PREGENERATED_SESSION_NUM'] 1abcd

74 session_order = settings.get('SESSION_ORDER', None) 1abcd

75 if session_order: # TODO test this out and make sure it okay 1abcd

76 assert settings["SESSION_ORDER"][settings["SESSION_IDX"]] == ses_nb 1acd

77 

78 path_fixtures = Path(ephys_fpga.__file__).parent.joinpath("ephys_sessions") 1abcd

79 

80 fixture = { 1abcd

81 "pcs": np.load(path_fixtures.joinpath(f"session_{ses_nb}_passive_pcs.npy")), 

82 "delays": np.load(path_fixtures.joinpath(f"session_{ses_nb}_passive_stimDelays.npy")), 

83 "ids": np.load(path_fixtures.joinpath(f"session_{ses_nb}_passive_stimIDs.npy")), 

84 } 

85 

86 return fixture 1abcd

87 

88 

89def _load_task_protocol(session_path: str, task_collection: str = 'raw_passive_data') -> str: 

90 """Find the IBL rig version used for the session 

91 

92 :param session_path: the path to a session 

93 :type session_path: str 

94 :return: ibl rig task protocol version 

95 :rtype: str 

96 """ 

97 settings = rawio.load_settings(session_path, task_collection=task_collection) 1abcd

98 ses_ver = settings["IBLRIG_VERSION_TAG"] 1abcd

99 

100 return ses_ver 1abcd

101 

102 

103def _load_passive_stim_meta() -> dict: 

104 """load_passive_stim_meta Loads the passive protocol metadata 

105 

106 :return: metadata about passive protocol stimulus presentation 

107 :rtype: dict 

108 """ 

109 path_fixtures = Path(ephys_fpga.__file__).parent.joinpath("ephys_sessions") 1habcdf

110 with open(path_fixtures.joinpath("passive_stim_meta.json"), "r") as f: 1habcdf

111 meta = json.load(f) 1habcdf

112 

113 return meta 1habcdf

114 

115 

116# 1/3 Define start and end times of the 3 passive periods 

117def _get_spacer_times(spacer_template, jitter, ttl_signal, t_quiet, thresh=3.0): 

118 """ 

119 Find timestamps of spacer signal. 

120 :param spacer_template: list of indices where ttl signal changes 

121 :type spacer_template: array-like 

122 :param jitter: jitter (in seconds) for matching ttl_signal with spacer_template 

123 :type jitter: float 

124 :param ttl_signal: 

125 :type ttl_signal: array-like 

126 :param t_quiet: seconds between spacer and next stim 

127 :type t_quiet: float 

128 :param thresh: threshold value for the fttl convolved signal (with spacer template) to pass over to detect a spacer 

129 :type thresh: float 

130 :return: times of spacer onset/offset 

131 :rtype: n_spacer x 2 np.ndarray; first col onset times, second col offset 

132 """ 

133 diff_spacer_template = np.diff(spacer_template) 1abcdf

134 # add jitter; 

135 # remove extreme values 

136 spacer_model = jitter + diff_spacer_template[2:-2] 1abcdf

137 # diff ttl signal to compare to spacer_model 

138 dttl = np.diff(ttl_signal) 1abcdf

139 # remove diffs larger than max diff in model to clean up signal 

140 dttl[dttl > np.max(spacer_model)] = 0 1abcdf

141 # convolve cleaned diff ttl signal w/ spacer model 

142 conv_dttl = np.correlate(dttl, spacer_model, mode="full") 1abcdf

143 # find spacer location 

144 idxs_spacer_middle = np.where( 1abcdf

145 (conv_dttl[1:-2] < thresh) & (conv_dttl[2:-1] > thresh) & (conv_dttl[3:] < thresh) 

146 )[0] 

147 # adjust indices for 

148 # - `np.where` call above 

149 # - length of spacer_model 

150 spacer_around = int((np.floor(len(spacer_model) / 2))) 1abcdf

151 idxs_spacer_middle += 2 - spacer_around 1abcdf

152 

153 # for each spacer make sure the times are monotonically non-decreasing before 

154 # and monotonically non-increasing afterwards 

155 is_valid = np.zeros((idxs_spacer_middle.size), dtype=bool) 1abcdf

156 for i, t in enumerate(idxs_spacer_middle): 1abcdf

157 before = all(np.diff(dttl[t - spacer_around:t]) >= 0) 1abcd

158 after = all(np.diff(dttl[t + 1:t + 1 + spacer_around]) <= 0) 1abcd

159 is_valid[i] = np.bitwise_and(before, after) 1abcd

160 

161 idxs_spacer_middle = idxs_spacer_middle[is_valid] 1abcdf

162 

163 # pull out spacer times (middle) 

164 ts_spacer_middle = ttl_signal[idxs_spacer_middle] 1abcdf

165 # put beginning/end of spacer times into an array 

166 spacer_length = np.max(spacer_template) 1abcdf

167 spacer_times = np.zeros(shape=(ts_spacer_middle.shape[0], 2)) 1abcdf

168 for i, t in enumerate(ts_spacer_middle): 1abcdf

169 spacer_times[i, 0] = t - (spacer_length / 2) - t_quiet 1abcd

170 spacer_times[i, 1] = t + (spacer_length / 2) + t_quiet 1abcd

171 return spacer_times, conv_dttl 1abcdf

172 

173 

174def _get_passive_spacers(session_path, sync_collection='raw_ephys_data', 

175 sync=None, sync_map=None, tmin=None, tmax=None): 

176 """ 

177 load and get spacer information, do corr to find spacer timestamps 

178 returns t_passive_starts, t_starts, t_ends 

179 """ 

180 if sync is None or sync_map is None: 1abcdf

181 sync, sync_map = ephys_fpga.get_sync_and_chn_map(session_path, sync_collection=sync_collection) 

182 meta = _load_passive_stim_meta() 1abcdf

183 # t_end_ephys = passive.ephysCW_end(session_path=session_path) 

184 fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=tmin, tmax=tmax) 1abcdf

185 fttl = ephys_fpga._clean_frame2ttl(fttl, display=False) 1abcdf

186 spacer_template = ( 1abcdf

187 np.array(meta["VISUAL_STIM_0"]["ttl_frame_nums"], dtype=np.float32) / FRAME_FS 

188 ) 

189 jitter = 3 / FRAME_FS # allow for 3 screen refresh as jitter 1abcdf

190 t_quiet = meta["VISUAL_STIM_0"]["delay_around"] 1abcdf

191 spacer_times, conv_dttl = _get_spacer_times( 1abcdf

192 spacer_template=spacer_template, jitter=jitter, ttl_signal=fttl["times"], t_quiet=t_quiet 

193 ) 

194 

195 # Check correct number of spacers found 

196 n_exp_spacer = np.sum(np.array(meta['STIM_ORDER']) == 0) # Hardcoded 0 for spacer 1abcdf

197 if n_exp_spacer != np.size(spacer_times) / 2: 1abcdf

198 error_nspacer = True 1f

199 # sometimes the first spacer is truncated 

200 # assess whether the first spacer is undetected, and then launch another spacer detection on truncated fttl 

201 # with a lower threshold value 

202 # Note: take *3 for some margin 

203 if spacer_times[0][0] > (spacer_template[-1] + jitter) * 3 and (np.size(spacer_times) / 2) == n_exp_spacer - 1: 1f

204 # Truncate signals 

205 fttl_t = fttl["times"][np.where(fttl["times"] < spacer_times[0][0])] 

206 conv_dttl_t = conv_dttl[np.where(fttl["times"] < spacer_times[0][0])] 

207 ddttl = np.diff(np.diff(fttl_t)) 

208 # Find spacer location 

209 # NB: cannot re-use the same algo for spacer detection as conv peaks towards spacer end 

210 # 1. Find time point at which conv raises above a given threshold value 

211 thresh = 2.0 

212 idx_nearend_spacer = int(np.where((conv_dttl_t[1:-2] < thresh) & (conv_dttl_t[2:-1] > thresh))[0]) 

213 ddttl = ddttl[0:idx_nearend_spacer] 

214 # 2. Find time point before this, for which fttl diff increase/decrease (this is the middle of spacer) 

215 indx_middle = np.where((ddttl[0:-1] > 0) & (ddttl[1:] < 0))[0] 

216 if len(indx_middle) == 1: 

217 # 3. Add 1/2 spacer to middle idx to get the spacer end indx 

218 spacer_around = int((np.floor(len(spacer_template) / 2))) 

219 idx_end = int(indx_middle + spacer_around) + 1 

220 spacer_times = np.insert(spacer_times, 0, np.array([fttl["times"][0], fttl["times"][idx_end]]), axis=0) 

221 error_nspacer = False 

222 

223 if error_nspacer: 

224 raise ValueError( 

225 f'The number of expected spacer ({n_exp_spacer}) ' 

226 f'is different than the one found on the raw ' 

227 f'trace ({int(np.size(spacer_times) / 2)})' 

228 ) 

229 

230 if tmax is None: # TODO THIS NEEDS CHANGING AS FOR DYNAMIC PIPELINE F2TTL slower than valve 1abcd

231 tmax = fttl['times'][-1] 1abcd

232 

233 spacer_times = np.r_[spacer_times.flatten(), tmax] 1abcd

234 return spacer_times[0], spacer_times[1::2], spacer_times[2::2] 1abcd

235 

236 

237# 2/3 RFMapping stimuli 

238def _interpolate_rf_mapping_stimulus(idxs_up, idxs_dn, times, Xq, t_bin): 

239 """ 

240 Interpolate stimulus presentation times to screen refresh rate to match `frames` 

241 :param ttl_01: 

242 :type ttl_01: array-like 

243 :param times: array of stimulus switch times 

244 :type times: array-like 

245 :param Xq: number of times (found in frames) 

246 :type frames: array-like 

247 :param t_bin: period of screen refresh rate 

248 :type t_bin: float 

249 :return: tuple of (stim_times, stim_frames) 

250 """ 

251 

252 beg_extrap_val = -10001 1gabcd

253 end_extrap_val = -10000 1gabcd

254 

255 X = np.sort(np.concatenate([idxs_up, idxs_dn])) 1gabcd

256 # make left and right extrapolations distinctive to easily find later 

257 Tq = np.interp(Xq, X, times, left=beg_extrap_val, right=end_extrap_val) 1gabcd

258 # uniform spacing outside boundaries of ttl signal 

259 # first values 

260 n_beg = len(np.where(Tq == beg_extrap_val)[0]) 1gabcd

261 if 0 < n_beg < Tq.shape[0]: 1gabcd

262 Tq[:n_beg] = times[0] - np.arange(n_beg, 0, -1) * t_bin 1abcd

263 # end values 

264 n_end = len(np.where(Tq == end_extrap_val)[0]) 1gabcd

265 if 0 < n_end < Tq.shape[0]: 1gabcd

266 Tq[-n_end:] = times[-1] + np.arange(1, n_end + 1) * t_bin 1gabcd

267 return Tq 1gabcd

268 

269 

270def _get_id_raisefall_from_analogttl(ttl_01): 

271 """ 

272 Get index of raise/fall from analog continuous TTL signal (0-1 values) 

273 :param ttl_01: analog continuous TTL signal (0-1 values) 

274 :return: index up (0>1), index down (1>0), number of ttl transition 

275 """ 

276 # Check values are 0, 1, -1 

277 if not np.all(np.isin(np.unique(ttl_01), [-1, 0, 1])): 1abcd

278 raise ValueError("Values in input must be 0, 1, -1") 

279 else: 

280 # Find number of passage from [0 1] and [0 -1] 

281 d_ttl_01 = np.diff(ttl_01) 1abcd

282 id_up = np.where(np.logical_and(ttl_01 == 0, np.append(d_ttl_01, 0) == 1))[0] 1abcd

283 id_dw = np.where(np.logical_and(ttl_01 == 0, np.append(d_ttl_01, 0) == -1))[0] 1abcd

284 n_ttl_expected = 2 * (len(id_up) + len(id_dw)) # *2 for rise/fall of ttl pulse 1abcd

285 return id_up, id_dw, n_ttl_expected 1abcd

286 

287 

288def _reshape_RF(RF_file, meta_stim): 

289 """ 

290 Reshape Receptive Field (RF) matrix. Take data associated to corner 

291 where frame2ttl placed to create TTL trace. 

292 :param RF_file: vector to be reshaped, containing RF info 

293 :param meta_stim: variable containing metadata information on RF 

294 :return: frames (reshaped RF), analog trace (0-1 values) 

295 """ 

296 frame_array = np.fromfile(RF_file, dtype="uint8") 1abcd

297 y_pix, x_pix, _ = meta_stim["stim_file_shape"] 1abcd

298 frames = np.transpose(np.reshape(frame_array, [y_pix, x_pix, -1], order="F"), [2, 1, 0]) 1abcd

299 ttl_trace = frames[:, 0, 0] 1abcd

300 # Convert values to 0,1,-1 for simplicity 

301 ttl_analogtrace_01 = np.zeros(np.size(ttl_trace)) 1abcd

302 ttl_analogtrace_01[np.where(ttl_trace == 0)] = -1 1abcd

303 ttl_analogtrace_01[np.where(ttl_trace == 255)] = 1 1abcd

304 return frames, ttl_analogtrace_01 1abcd

305 

306 

307# 3/3 Replay of task stimuli 

308def _extract_passiveGabor_df(fttl: dict, session_path: str, task_collection: str = 'raw_passive_data') -> pd.DataFrame: 

309 # At this stage we want to define what pulses are and not quality control them. 

310 # Pulses are strictly alternating with intervals 

311 # find min max lengths for both (we don't know which are pulses and which are intervals yet) 

312 # trim edges of pulses 

313 diff0 = (np.min(np.diff(fttl["times"])[2:-2:2]), np.max(np.diff(fttl["times"])[2:-1:2])) 1abcd

314 diff1 = (np.min(np.diff(fttl["times"])[3:-2:2]), np.max(np.diff(fttl["times"])[3:-1:2])) 1abcd

315 # Highest max is of the intervals 

316 if max(diff0 + diff1) in diff0: 1abcd

317 thresh = diff0[0] 

318 elif max(diff0 + diff1) in diff1: 1abcd

319 thresh = diff1[0] 1abcd

320 # Anything lower than the min length of intervals is a pulse 

321 idx_start_stims = np.where((np.diff(fttl["times"]) < thresh) & (np.diff(fttl["times"]) > 0.1))[0] 1abcd

322 # Check if any pulse has been missed 

323 # i.e. expected length (without first pulse) and that it's alternating 

324 if len(idx_start_stims) < NGABOR - 1 and np.any(np.diff(idx_start_stims) > 2): 1abcd

325 log.warning("Looks like one or more pulses were not detected, trying to extrapolate...") 

326 missing_where = np.where(np.diff(idx_start_stims) > 2)[0] 

327 insert_where = missing_where + 1 

328 missing_value = idx_start_stims[missing_where] + 2 

329 idx_start_stims = np.insert(idx_start_stims, insert_where, missing_value) 

330 

331 idx_end_stims = idx_start_stims + 1 1abcd

332 

333 start_times = fttl["times"][idx_start_stims] 1abcd

334 end_times = fttl["times"][idx_end_stims] 1abcd

335 # Check if we missed the first stim 

336 if len(start_times) < NGABOR: 1abcd

337 first_stim_off_idx = idx_start_stims[0] - 1 1abcd

338 # first_stim_on_idx = first_stim_off_idx - 1 

339 end_times = np.insert(end_times, 0, fttl["times"][first_stim_off_idx]) 1abcd

340 start_times = np.insert(start_times, 0, end_times[0] - 0.3) 1abcd

341 

342 # intervals dstype requires reshaping of start and end times 

343 passiveGabor_intervals = np.array([(x, y) for x, y in zip(start_times, end_times)]) 1abcd

344 

345 # Check length of presentation of stim is within 150ms of expected 

346 if not np.allclose([y - x for x, y in passiveGabor_intervals], 0.3, atol=0.15): 1abcd

347 log.warning("Some Gabor presentation lengths seem wrong.") 

348 

349 assert ( 

350 len(passiveGabor_intervals) == NGABOR 

351 ), f"Wrong number of Gabor stimuli detected: {len(passiveGabor_intervals)} / {NGABOR}" 

352 fixture = _load_passive_session_fixtures(session_path, task_collection) 1abcd

353 passiveGabor_properties = fixture["pcs"] 1abcd

354 passiveGabor_table = np.append(passiveGabor_intervals, passiveGabor_properties, axis=1) 1abcd

355 columns = ["start", "stop", "position", "contrast", "phase"] 1abcd

356 passiveGabor_df = pd.DataFrame(passiveGabor_table, columns=columns) 1abcd

357 return passiveGabor_df 1abcd

358 

359 

360def _extract_passiveValve_intervals(bpod: dict) -> np.array: 

361 # passiveValve.intervals 

362 # Get valve intervals from bpod channel 

363 # bpod channel should only contain valve output for passiveCW protocol 

364 # All high fronts == valve open times and low fronts == valve close times 

365 valveOn_times = bpod["times"][bpod["polarities"] > 0] 1abcd

366 valveOff_times = bpod["times"][bpod["polarities"] < 0] 1abcd

367 

368 assert len(valveOn_times) == NVALVE, "Wrong number of valve ONSET times" 1abcd

369 assert len(valveOff_times) == NVALVE, "Wrong number of valve OFFSET times" 1abcd

370 assert len(bpod["times"]) == NVALVE * 2, "Wrong number of valve FRONTS detected" # (40 * 2) 1abcd

371 

372 # check all values are within bpod tolerance of 100µs 

373 assert np.allclose( 1abcd

374 valveOff_times - valveOn_times, valveOff_times[1] - valveOn_times[1], atol=0.0001 

375 ), "Some valve outputs are longer or shorter than others" 

376 

377 return np.array([(x, y) for x, y in zip(valveOn_times, valveOff_times)]) 1abcd

378 

379 

380def _extract_passiveAudio_intervals(audio: dict, rig_version: str) -> Tuple[np.array, np.array]: 

381 

382 # make an exception for task version = 6.2.5 where things are strange but data is recoverable 

383 if rig_version == '6.2.5': 1abcd

384 # Get all sound onsets and offsets 

385 soundOn_times = audio["times"][audio["polarities"] > 0] 

386 soundOff_times = audio["times"][audio["polarities"] < 0] 

387 

388 # Have a couple that are wayyy too long! 

389 time_threshold = 10 

390 diff = soundOff_times - soundOn_times 

391 stupid = np.where(diff > time_threshold)[0] 

392 NREMOVE = len(stupid) 

393 not_stupid = np.where(diff < time_threshold)[0] 

394 

395 assert len(soundOn_times) == NTONES + NNOISES - NREMOVE, "Wrong number of sound ONSETS" 

396 assert len(soundOff_times) == NTONES + NNOISES - NREMOVE, "Wrong number of sound OFFSETS" 

397 

398 soundOn_times = soundOn_times[not_stupid] 

399 soundOff_times = soundOff_times[not_stupid] 

400 

401 diff = soundOff_times - soundOn_times 

402 # Tone is ~100ms so check if diff < 0.3 

403 toneOn_times = soundOn_times[diff <= 0.3] 

404 toneOff_times = soundOff_times[diff <= 0.3] 

405 # Noise is ~500ms so check if diff > 0.3 

406 noiseOn_times = soundOn_times[diff > 0.3] 

407 noiseOff_times = soundOff_times[diff > 0.3] 

408 

409 # append with nans 

410 toneOn_times = np.r_[toneOn_times, np.full((NTONES - len(toneOn_times)), np.NAN)] 

411 toneOff_times = np.r_[toneOff_times, np.full((NTONES - len(toneOff_times)), np.NAN)] 

412 noiseOn_times = np.r_[noiseOn_times, np.full((NNOISES - len(noiseOn_times)), np.NAN)] 

413 noiseOff_times = np.r_[noiseOff_times, np.full((NNOISES - len(noiseOff_times)), np.NAN)] 

414 

415 else: 

416 # Get all sound onsets and offsets 

417 soundOn_times = audio["times"][audio["polarities"] > 0] 1abcd

418 soundOff_times = audio["times"][audio["polarities"] < 0] 1abcd

419 

420 # Check they are the correct number 

421 assert len(soundOn_times) == NTONES + NNOISES, "Wrong number of sound ONSETS" 1abcd

422 assert len(soundOff_times) == NTONES + NNOISES, "Wrong number of sound OFFSETS" 1abcd

423 

424 diff = soundOff_times - soundOn_times 1abcd

425 # Tone is ~100ms so check if diff < 0.3 

426 toneOn_times = soundOn_times[diff <= 0.3] 1abcd

427 toneOff_times = soundOff_times[diff <= 0.3] 1abcd

428 # Noise is ~500ms so check if diff > 0.3 

429 noiseOn_times = soundOn_times[diff > 0.3] 1abcd

430 noiseOff_times = soundOff_times[diff > 0.3] 1abcd

431 

432 assert len(toneOn_times) == NTONES 1abcd

433 assert len(toneOff_times) == NTONES 1abcd

434 assert len(noiseOn_times) == NNOISES 1abcd

435 assert len(noiseOff_times) == NNOISES 1abcd

436 

437 # Fixed delays from soundcard ~500µs 

438 np.allclose(toneOff_times - toneOn_times, 0.1, atol=0.0006) 1abcd

439 np.allclose(noiseOff_times - noiseOn_times, 0.5, atol=0.0006) 1abcd

440 

441 passiveTone_intervals = np.append( 1abcd

442 toneOn_times.reshape((len(toneOn_times), 1)), 

443 toneOff_times.reshape((len(toneOff_times), 1)), 

444 axis=1, 

445 ) 

446 passiveNoise_intervals = np.append( 1abcd

447 noiseOn_times.reshape((len(noiseOn_times), 1)), 

448 noiseOff_times.reshape((len(noiseOff_times), 1)), 

449 axis=1, 

450 ) 

451 return passiveTone_intervals, passiveNoise_intervals 1abcd

452 

453 

454# ------------------------------------------------------------------ 

455def extract_passive_periods(session_path: str, sync_collection: str = 'raw_ephys_data', sync: dict = None, 

456 sync_map: dict = None, tmin=None, tmax=None) -> pd.DataFrame: 

457 

458 if sync is None or sync_map is None: 1abcdf

459 sync, sync_map = ephys_fpga.get_sync_and_chn_map(session_path, sync_collection) 

460 

461 t_start_passive, t_starts, t_ends = _get_passive_spacers( 1abcdf

462 session_path, sync_collection, sync=sync, sync_map=sync_map, tmin=tmin, tmax=tmax 

463 ) 

464 t_starts_col = np.insert(t_starts, 0, t_start_passive) 1abcd

465 t_ends_col = np.insert(t_ends, 0, t_ends[-1]) 1abcd

466 # tpassive_protocol = [t_start_passive, t_ends[-1]] 

467 # tspontaneous = [t_starts[0], t_ends[0]] 

468 # trfm = [t_starts[1], t_ends[1]] 

469 # treplay = [t_starts[2], t_ends[2]] 

470 passivePeriods_df = pd.DataFrame( 1abcd

471 [t_starts_col, t_ends_col], 

472 index=["start", "stop"], 

473 columns=["passiveProtocol", "spontaneousActivity", "RFM", "taskReplay"], 

474 ) 

475 return passivePeriods_df # _ibl_passivePeriods.intervalsTable.csv 1abcd

476 

477 

478def extract_rfmapping( 

479 session_path: str, sync_collection: str = 'raw_ephys_data', task_collection: str = 'raw_passive_data', 

480 sync: dict = None, sync_map: dict = None, trfm: np.array = None 

481) -> Tuple[np.array, np.array]: 

482 meta = _load_passive_stim_meta() 1abcd

483 mkey = ( 1abcd

484 "VISUAL_STIM_" 

485 + {v: k for k, v in meta["VISUAL_STIMULI"].items()}["receptive_field_mapping"] 

486 ) 

487 if sync is None or sync_map is None: 1abcd

488 sync, sync_map = ephys_fpga.get_sync_and_chn_map(session_path, sync_collection) 

489 if trfm is None: 1abcd

490 passivePeriods_df = extract_passive_periods(session_path, sync_collection, sync=sync, sync_map=sync_map) 

491 trfm = passivePeriods_df.RFM.values 

492 

493 fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=trfm[0], tmax=trfm[1]) 1abcd

494 fttl = ephys_fpga._clean_frame2ttl(fttl) 1abcd

495 RF_file = Path().joinpath(session_path, task_collection, "_iblrig_RFMapStim.raw.bin") 1abcd

496 passiveRFM_frames, RF_ttl_trace = _reshape_RF(RF_file=RF_file, meta_stim=meta[mkey]) 1abcd

497 rf_id_up, rf_id_dw, RF_n_ttl_expected = _get_id_raisefall_from_analogttl(RF_ttl_trace) 1abcd

498 meta[mkey]["ttl_num"] = RF_n_ttl_expected 1abcd

499 rf_times_on_idx = np.where(np.diff(fttl["times"]) < 1)[0] 1abcd

500 rf_times_off_idx = rf_times_on_idx + 1 1abcd

501 RF_times = fttl["times"][np.sort(np.concatenate([rf_times_on_idx, rf_times_off_idx]))] 1abcd

502 RF_times_1 = RF_times[0::2] 1abcd

503 # Interpolate times for RF before outputting dataset 

504 passiveRFM_times = _interpolate_rf_mapping_stimulus( 1abcd

505 idxs_up=rf_id_up, 

506 idxs_dn=rf_id_dw, 

507 times=RF_times_1, 

508 Xq=np.arange(passiveRFM_frames.shape[0]), 

509 t_bin=1 / FRAME_FS, 

510 ) 

511 

512 return passiveRFM_times # _ibl_passiveRFM.times.npy 1abcd

513 

514 

515def extract_task_replay( 

516 session_path: str, sync_collection: str = 'raw_ephys_data', task_collection: str = 'raw_passive_data', 

517 sync: dict = None, sync_map: dict = None, treplay: np.array = None 

518) -> Tuple[pd.DataFrame, pd.DataFrame]: 

519 

520 if sync is None or sync_map is None: 1abcd

521 sync, sync_map = ephys_fpga.get_sync_and_chn_map(session_path, sync_collection) 

522 

523 if treplay is None: 1abcd

524 passivePeriods_df = extract_passive_periods(session_path, sync_collection, sync=sync, sync_map=sync_map) 

525 treplay = passivePeriods_df.taskReplay.values 

526 

527 # TODO need to check this is okay 

528 fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=treplay[0], tmax=treplay[1]) 1abcd

529 fttl = ephys_fpga._clean_frame2ttl(fttl) 1abcd

530 passiveGabor_df = _extract_passiveGabor_df(fttl, session_path, task_collection=task_collection) 1abcd

531 

532 bpod = ephys_fpga.get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0], tmax=treplay[1]) 1abcd

533 passiveValve_intervals = _extract_passiveValve_intervals(bpod) 1abcd

534 

535 task_version = _load_task_protocol(session_path, task_collection) 1abcd

536 audio = ephys_fpga.get_sync_fronts(sync, sync_map["audio"], tmin=treplay[0], tmax=treplay[1]) 1abcd

537 passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(audio, task_version) 1abcd

538 

539 passiveStims_df = np.concatenate( 1abcd

540 [passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals], axis=1 

541 ) 

542 columns = ["valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"] 1abcd

543 passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns) 1abcd

544 return ( 1abcd

545 passiveGabor_df, 

546 passiveStims_df, 

547 ) # _ibl_passiveGabor.table.csv, _ibl_passiveStims.times_table.csv 

548 

549 

550def extract_replay_debug( 

551 session_path: str, 

552 sync_collection: str = 'raw_ephys_data', 

553 task_collection: str = 'raw_passive_data', 

554 sync: dict = None, 

555 sync_map: dict = None, 

556 treplay: np.array = None, 

557 ax: plt.axes = None, 

558) -> Tuple[pd.DataFrame, pd.DataFrame]: 

559 # Load sessions sync channels, map 

560 if sync is None or sync_map is None: 

561 sync, sync_map = ephys_fpga.get_sync_and_chn_map(session_path, sync_collection) 

562 

563 if treplay is None: 

564 passivePeriods_df = extract_passive_periods(session_path, sync_collection=sync_collection, sync=sync, sync_map=sync_map) 

565 treplay = passivePeriods_df.taskReplay.values 

566 

567 if ax is None: 

568 f, ax = plt.subplots(1, 1) 

569 

570 f = ax.figure 

571 f.suptitle("/".join(str(session_path).split("/")[-5:])) 

572 plot_sync_channels(sync=sync, sync_map=sync_map, ax=ax) 

573 

574 passivePeriods_df = extract_passive_periods(session_path, sync_collection=sync_collection, sync=sync, sync_map=sync_map) 

575 treplay = passivePeriods_df.taskReplay.values 

576 

577 plot_passive_periods(passivePeriods_df, ax=ax) 

578 

579 fttl = ephys_fpga.get_sync_fronts(sync, sync_map["frame2ttl"], tmin=treplay[0]) 

580 passiveGabor_df = _extract_passiveGabor_df(fttl, session_path, task_collection=task_collection) 

581 plot_gabor_times(passiveGabor_df, ax=ax) 

582 

583 bpod = ephys_fpga.get_sync_fronts(sync, sync_map["bpod"], tmin=treplay[0]) 

584 passiveValve_intervals = _extract_passiveValve_intervals(bpod) 

585 plot_valve_times(passiveValve_intervals, ax=ax) 

586 

587 task_version = _load_task_protocol(session_path, task_collection) 

588 audio = ephys_fpga.get_sync_fronts(sync, sync_map["audio"], tmin=treplay[0]) 

589 passiveTone_intervals, passiveNoise_intervals = _extract_passiveAudio_intervals(audio, task_version) 

590 plot_audio_times(passiveTone_intervals, passiveNoise_intervals, ax=ax) 

591 

592 passiveStims_df = np.concatenate( 

593 [passiveValve_intervals, passiveTone_intervals, passiveNoise_intervals], axis=1 

594 ) 

595 columns = ["valveOn", "valveOff", "toneOn", "toneOff", "noiseOn", "noiseOff"] 

596 passiveStims_df = pd.DataFrame(passiveStims_df, columns=columns) 

597 

598 return ( 

599 passiveGabor_df, 

600 passiveStims_df, 

601 ) # _ibl_passiveGabor.table.csv, _ibl_passiveStims.table.csv 

602 

603 

604# Main passiveCW extractor, calls all others 

605class PassiveChoiceWorld(BaseExtractor): 

606 save_names = ( 

607 "_ibl_passivePeriods.intervalsTable.csv", 

608 "_ibl_passiveRFM.times.npy", 

609 "_ibl_passiveGabor.table.csv", 

610 "_ibl_passiveStims.table.csv", 

611 ) 

612 var_names = ( 

613 "passivePeriods_df", 

614 "passiveRFM_times", 

615 "passiveGabor_df", 

616 "passiveStims_df", 

617 ) 

618 

619 def _extract(self, sync_collection: str = 'raw_ephys_data', task_collection: str = 'raw_passive_data', sync: dict = None, 

620 sync_map: dict = None, plot: bool = False, **kwargs) -> tuple: 

621 if sync is None or sync_map is None: 1abcdf

622 sync, sync_map = ephys_fpga.get_sync_and_chn_map(self.session_path, sync_collection) 1abcdf

623 

624 # Get the start and end times of this protocol 

625 if (protocol_number := kwargs.get('protocol_number')) is not None: # look for spacer 1abcdf

626 # The spacers are TTLs generated by Bpod at the start of each protocol 

627 bpod = ephys_fpga.get_sync_fronts(sync, sync_map['bpod']) 1b

628 tmin, tmax = ephys_fpga.get_protocol_period(self.session_path, protocol_number, bpod) 1b

629 else: 

630 tmin = tmax = None 1acdf

631 

632 # Passive periods 

633 passivePeriods_df = extract_passive_periods(self.session_path, sync_collection=sync_collection, sync=sync, 1abcdf

634 sync_map=sync_map, tmin=tmin, tmax=tmax) 

635 trfm = passivePeriods_df.RFM.values 1abcd

636 treplay = passivePeriods_df.taskReplay.values 1abcd

637 

638 try: 1abcd

639 # RFMapping 

640 passiveRFM_times = extract_rfmapping(self.session_path, sync_collection=sync_collection, 1abcd

641 task_collection=task_collection, sync=sync, sync_map=sync_map, trfm=trfm) 

642 except Exception as e: 

643 log.error(f"Failed to extract RFMapping datasets: {e}") 

644 passiveRFM_times = None 

645 

646 try: 1abcd

647 (passiveGabor_df, passiveStims_df,) = extract_task_replay( 1abcd

648 self.session_path, sync_collection=sync_collection, task_collection=task_collection, sync=sync, 

649 sync_map=sync_map, treplay=treplay) 

650 except Exception as e: 

651 log.error(f"Failed to extract task replay stimuli: {e}") 

652 passiveGabor_df, passiveStims_df = (None, None) 

653 

654 if plot: 1abcd

655 f, ax = plt.subplots(1, 1) 

656 f.suptitle("/".join(str(self.session_path).split("/")[-5:])) 

657 plot_sync_channels(sync=sync, sync_map=sync_map, ax=ax) 

658 plot_passive_periods(passivePeriods_df, ax=ax) 

659 plot_rfmapping(passiveRFM_times, ax=ax) 

660 plot_gabor_times(passiveGabor_df, ax=ax) 

661 plot_stims_times(passiveStims_df, ax=ax) 

662 plt.show() 

663 

664 data = ( 1abcd

665 passivePeriods_df, # _ibl_passivePeriods.intervalsTable.csv 

666 passiveRFM_times, # _ibl_passiveRFM.times.npy 

667 passiveGabor_df, # _ibl_passiveGabor.table.csv, 

668 passiveStims_df # _ibl_passiveStims.table.csv 

669 ) 

670 

671 # Set save names to None if data not extracted - these will not be saved or registered 

672 self.save_names = tuple(None if y is None else x for x, y in zip(self.save_names, data)) 1abcd

673 return data 1abcd