Coverage for ibllib/io/raw_data_loaders.py: 84%
433 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-10-11 11:13 +0100
« prev ^ index » next coverage.py v7.3.2, created at 2023-10-11 11:13 +0100
1#!/usr/bin/env python
2# -*- coding:utf-8 -*-
3# @Author: Niccolò Bonacchi, Miles Wells
4# @Date: Monday, July 16th 2018, 1:28:46 pm
5"""
6Raw Data Loader functions for PyBpod rig
8Module contains one loader function per raw datafile
9"""
10import json
11import logging
12import wave
13from collections import OrderedDict
14from datetime import datetime
15from pathlib import Path, PureWindowsPath
16from typing import Union
18from dateutil import parser as dateparser
19from pkg_resources import parse_version
20import numpy as np
21import pandas as pd
23from iblutil.io import jsonable
24from ibllib.io.video import assert_valid_label
25from ibllib.time import uncycle_pgts, convert_pgts, date2isostr
27_logger = logging.getLogger(__name__)
30def trial_times_to_times(raw_trial):
31 """
32 Parse and convert all trial timestamps to "absolute" time.
33 Float64 seconds from session start.
35 0---BpodStart---TrialStart0---------TrialEnd0-----TrialStart1---TrialEnd1...0---ts0---ts1---
36 tsN...absTS = tsN + TrialStartN - BpodStart
38 Bpod timestamps are in microseconds (µs)
39 PyBpod timestamps are is seconds (s)
41 :param raw_trial: raw trial data
42 :type raw_trial: dict
43 :return: trial data with modified timestamps
44 :rtype: dict
45 """
46 ts_bs = raw_trial['behavior_data']['Bpod start timestamp'] 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
47 ts_ts = raw_trial['behavior_data']['Trial start timestamp'] 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
48 # ts_te = raw_trial['behavior_data']['Trial end timestamp']
50 def convert(ts): 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
51 return ts + ts_ts - ts_bs 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
53 converted_events = {} 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
54 for k, v in raw_trial['behavior_data']['Events timestamps'].items(): 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
55 converted_events.update({k: [convert(i) for i in v]}) 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
56 raw_trial['behavior_data']['Events timestamps'] = converted_events 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
58 converted_states = {} 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
59 for k, v in raw_trial['behavior_data']['States timestamps'].items(): 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
60 converted_states.update({k: [[convert(i) for i in x] for x in v]}) 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
61 raw_trial['behavior_data']['States timestamps'] = converted_states 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
63 shift = raw_trial['behavior_data']['Bpod start timestamp'] 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
64 raw_trial['behavior_data']['Bpod start timestamp'] -= shift 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
65 raw_trial['behavior_data']['Trial start timestamp'] -= shift 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
66 raw_trial['behavior_data']['Trial end timestamp'] -= shift 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
67 assert raw_trial['behavior_data']['Bpod start timestamp'] == 0 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
68 return raw_trial 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
71def load_bpod(session_path, task_collection='raw_behavior_data'):
72 """
73 Load both settings and data from bpod (.json and .jsonable)
75 :param session_path: Absolute path of session folder
76 :param task_collection: Collection within sesison path with behavior data
77 :return: dict settings and list of dicts data
78 """
79 return load_settings(session_path, task_collection), load_data(session_path, task_collection) 1b@]^HcfmhzVijaW
82def load_data(session_path: Union[str, Path], task_collection='raw_behavior_data', time='absolute'):
83 """
84 Load PyBpod data files (.jsonable).
86 Bpod timestamps are in microseconds (µs)
87 PyBpod timestamps are is seconds (s)
89 :param session_path: Absolute path of session folder
90 :type session_path: str, Path
91 :return: A list of len ntrials each trial being a dictionary
92 :rtype: list of dicts
93 """
94 if session_path is None: 1b+kXYZ01O23456789!#P$eQ@]^yrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
95 _logger.warning('No data loaded: session_path is None')
96 return
97 path = Path(session_path).joinpath(task_collection) 1b+kXYZ01O23456789!#P$eQ@]^yrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
98 path = next(path.glob('_iblrig_taskData.raw*.jsonable'), None) 1b+kXYZ01O23456789!#P$eQ@]^yrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
99 if not path: 1b+kXYZ01O23456789!#P$eQ@]^yrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
100 _logger.warning('No data loaded: could not find raw data file') 1@]^f
101 return None 1@]^f
102 data = jsonable.read(path) 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
103 if time == 'absolute': 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
104 data = [trial_times_to_times(t) for t in data] 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
105 return data 1b+kXYZ01O23456789!#P$eQyrRwHDstcS*IJlvng%opEfmhzVAuBijTaW'qxCd
108def load_camera_frameData(session_path, camera: str = 'left', raw: bool = False) -> pd.DataFrame:
109 """ Loads binary frame data from Bonsai camera recording workflow.
111 Args:
112 session_path (StrPath): Path to session folder
113 camera (str, optional): Load FramsData for specific camera. Defaults to 'left'.
114 raw (bool, optional): Whether to return raw or parsed data. Defaults to False.
116 Returns:
117 parsed: (raw=False, Default)
118 pandas.DataFrame: 4 int64 columns: {
119 Timestamp, # float64 (seconds from session start)
120 embeddedTimeStamp, # float64 (seconds from session start)
121 embeddedFrameCounter, # int64 (Frame number from session start)
122 embeddedGPIOPinState # object (State of each of the 4 GPIO pins as a
123 # list of numpy boolean arrays
124 # e.g. np.array([True, False, False, False])
125 }
126 raw:
127 pandas.DataFrame: 4 int64 columns: {
128 Timestamp, # UTC ticks from BehaviorPC
129 # (100's of ns since midnight 1/1/0001)
130 embeddedTimeStamp, # Camera timestamp (Needs unclycling and conversion)
131 embeddedFrameCounter, # Frame counter (int)
132 embeddedGPIOPinState # GPIO pin state integer representation of 4 pins
133 }
134 """
135 camera = assert_valid_label(camera) 1`(Fg
136 fpath = Path(session_path).joinpath("raw_video_data") 1`(Fg
137 fpath = next(fpath.glob(f"_iblrig_{camera}Camera.frameData*.bin"), None) 1`(Fg
138 assert fpath, f"{fpath}\nFile not Found: Could not find bin file for cam <{camera}>" 1`(Fg
139 rdata = np.fromfile(fpath, dtype=np.float64) 1`(Fg
140 assert rdata.size % 4 == 0, "Dimension mismatch: bin file length is not mod 4" 1`(Fg
141 rows = int(rdata.size / 4) 1`(Fg
142 data = np.reshape(rdata.astype(np.int64), (rows, 4)) 1`(Fg
143 df_dict = dict.fromkeys( 1`(Fg
144 ["Timestamp", "embeddedTimeStamp", "embeddedFrameCounter", "embeddedGPIOPinState"]
145 )
146 df = pd.DataFrame(data, columns=df_dict.keys()) 1`(Fg
147 if raw: 1`(Fg
148 return df 1`
150 df_dict["Timestamp"] = (data[:, 0] - data[0, 0]) / 10_000_000 # in seconds from first frame 1`(Fg
151 camerats = uncycle_pgts(convert_pgts(data[:, 1])) 1`(Fg
152 df_dict["embeddedTimeStamp"] = camerats - camerats[0] # in seconds from first frame 1`(Fg
153 df_dict["embeddedFrameCounter"] = data[:, 2] - data[0, 2] # from start 1`(Fg
154 gpio = (np.right_shift(np.tile(data[:, 3], (4, 1)).T, np.arange(31, 27, -1)) & 0x1) == 1 1`(Fg
155 df_dict["embeddedGPIOPinState"] = [np.array(x) for x in gpio.tolist()] 1`(Fg
157 parsed_df = pd.DataFrame.from_dict(df_dict) 1`(Fg
158 return parsed_df 1`(Fg
161def load_camera_ssv_times(session_path, camera: str):
162 """
163 Load the bonsai frame and camera timestamps from Camera.timestamps.ssv
165 NB: For some sessions the frame times are in the first column, in others the order is reversed.
166 NB: If using the new bin file the bonsai_times is a float in seconds since first frame
167 :param session_path: Absolute path of session folder
168 :param camera: Name of the camera to load, e.g. 'left'
169 :return: array of datetimes, array of frame times in seconds
170 """
171 camera = assert_valid_label(camera) 2eb| c K l v n fbL F U g o p E a
172 video_path = Path(session_path).joinpath('raw_video_data') 2eb| c K l v n fbL F U g o p E a
173 if next(video_path.glob(f'_iblrig_{camera}Camera.frameData*.bin'), None): 2eb| c K l v n fbL F U g o p E a
174 df = load_camera_frameData(session_path, camera=camera) 1Fg
175 return df['Timestamp'].values, df['embeddedTimeStamp'].values 1Fg
177 file = next(video_path.glob(f'_iblrig_{camera.lower()}Camera.timestamps*.ssv'), None) 2eb| c K l v n fbL F U g o p E a
178 if not file: 2eb| c K l v n fbL F U g o p E a
179 file = str(video_path.joinpath(f'_iblrig_{camera.lower()}Camera.timestamps.ssv'))
180 raise FileNotFoundError(file + ' not found')
181 # NB: Numpy has deprecated support for non-naive timestamps.
182 # Converting them is extremely slow: 6000 timestamps takes 0.8615s vs 0.0352s.
183 # from datetime import timezone
184 # c = {0: lambda x: datetime.fromisoformat(x).astimezone(timezone.utc).replace(tzinfo=None)}
186 # Determine the order of the columns by reading one line and testing whether the first value
187 # is an integer or not.
188 with open(file, 'r') as f: 2eb| c K l v n fbL F U g o p E a
189 line = f.readline() 2eb| c K l v n fbL F U g o p E a
190 type_map = OrderedDict(bonsai='<M8[ns]', camera='<u4') 2eb| c K l v n fbL F U g o p E a
191 try: 2eb| c K l v n fbL F U g o p E a
192 int(line.split(' ')[1]) 2eb| c K l v n fbL F U g o p E a
193 except ValueError: 1|clnopEa
194 type_map.move_to_end('bonsai') 1|clnopEa
195 ssv_params = dict(names=type_map.keys(), dtype=','.join(type_map.values()), delimiter=' ') 2eb| c K l v n fbL F U g o p E a
196 ssv_times = np.genfromtxt(file, **ssv_params) # np.loadtxt is slower for some reason 2eb| c K l v n fbL F U g o p E a
197 bonsai_times = ssv_times['bonsai'] 2eb| c K l v n fbL F U g o p E a
198 camera_times = uncycle_pgts(convert_pgts(ssv_times['camera'])) 2eb| c K l v n fbL F U g o p E a
199 return bonsai_times, camera_times 2eb| c K l v n fbL F U g o p E a
202def load_embedded_frame_data(session_path, label: str, raw=False):
203 """
204 Load the embedded frame count and GPIO for a given session. If the file doesn't exist,
205 or is empty, None values are returned.
206 :param session_path: Absolute path of session folder
207 :param label: The specific video to load, one of ('left', 'right', 'body')
208 :param raw: If True the raw data are returned without preprocessing, otherwise frame count is
209 returned starting from 0 and the GPIO is returned as a dict of indices
210 :return: The frame count, GPIO
211 """
212 count = load_camera_frame_count(session_path, label, raw=raw) 1=cI_;JKlvnLFUgopEfa
213 gpio = load_camera_gpio(session_path, label, as_dicts=not raw) 1=cI_;JKlvnLFUgopEfa
214 return count, gpio 1=cI_;JKlvnLFUgopEfa
217def load_camera_frame_count(session_path, label: str, raw=True):
218 """
219 Load the embedded frame count for a given session. If the file doesn't exist, or is empty,
220 a None value is returned.
221 :param session_path: Absolute path of session folder
222 :param label: The specific video to load, one of ('left', 'right', 'body')
223 :param raw: If True the raw data are returned without preprocessing, otherwise frame count is
224 returned starting from 0
225 :return: The frame count
226 """
227 if session_path is None: 2lb= c I _ ; J K l v n L F U g o p E f a
228 return 2lb
230 label = assert_valid_label(label) 2lb= c I _ ; J K l v n L F U g o p E f a
231 video_path = Path(session_path).joinpath('raw_video_data') 2lb= c I _ ; J K l v n L F U g o p E f a
232 if next(video_path.glob(f'_iblrig_{label}Camera.frameData*.bin'), None): 2lb= c I _ ; J K l v n L F U g o p E f a
233 df = load_camera_frameData(session_path, camera=label) 1Fg
234 return df['embeddedFrameCounter'].values 1Fg
236 # Load frame count
237 glob = video_path.glob(f'_iblrig_{label}Camera.frame_counter*.bin') 2lb= c I _ ; J K l v n L F U g o p E f a
238 count_file = next(glob, None) 2lb= c I _ ; J K l v n L F U g o p E f a
239 count = np.fromfile(count_file, dtype=np.float64).astype(int) if count_file else [] 2lb= c I _ ; J K l v n L F U g o p E f a
240 if len(count) == 0: 2lb= c I _ ; J K l v n L F U g o p E f a
241 return 2lbc I _ ; J l f a
242 if not raw: 2lb= K v n L F U g o p E
243 count -= count[0] # start from zero 2lb= K n L U g o p E
244 return count 2lb= K v n L F U g o p E
247def load_camera_gpio(session_path, label: str, as_dicts=False):
248 """
249 Load the GPIO for a given session. If the file doesn't exist, or is empty, a None value is
250 returned.
252 The raw binary file contains uint32 values (saved as doubles) where the first 4 bits
253 represent the state of each of the 4 GPIO pins. The array is expanded to an n x 4 array by
254 shifting each bit to the end and checking whether it is 0 (low state) or 1 (high state).
256 :param session_path: Absolute path of session folder
257 :param label: The specific video to load, one of ('left', 'right', 'body')
258 :param as_dicts: If False the raw data are returned boolean array with shape (n_frames, n_pins)
259 otherwise GPIO is returned as a list of dictionaries with keys ('indices', 'polarities').
260 :return: An nx4 boolean array where columns represent state of GPIO pins 1-4.
261 If as_dicts is True, a list of dicts is returned with keys ('indices', 'polarities'),
262 or None if the dictionary is empty.
263 """
264 if session_path is None: 1(=cI_;JKlvnLFUgopEfa
265 return 1(
266 raw_path = Path(session_path).joinpath('raw_video_data') 1(=cI_;JKlvnLFUgopEfa
267 label = assert_valid_label(label) 1(=cI_;JKlvnLFUgopEfa
269 # Load pin state
270 if next(raw_path.glob(f'_iblrig_{label}Camera.frameData*.bin'), False): 1(=cI_;JKlvnLFUgopEfa
271 df = load_camera_frameData(session_path, camera=label, raw=False) 1(Fg
272 gpio = np.array([x for x in df['embeddedGPIOPinState'].values]) 1(Fg
273 if len(gpio) == 0: 1(Fg
274 return [None] * 4 if as_dicts else None
275 else:
276 GPIO_file = next(raw_path.glob(f'_iblrig_{label}Camera.GPIO*.bin'), None) 1(=cI_;JKlvnLUgopEfa
277 # This deals with missing and empty files the same
278 gpio = np.fromfile(GPIO_file, dtype=np.float64).astype(np.uint32) if GPIO_file else [] 1(=cI_;JKlvnLUgopEfa
279 # Check values make sense (4 pins = 16 possible values)
280 if not np.isin(gpio, np.left_shift(np.arange(2 ** 4, dtype=np.uint32), 32 - 4)).all(): 1(=cI_;JKlvnLUgopEfa
281 _logger.warning('Unexpected GPIO values; decoding may fail') 1(
282 if len(gpio) == 0: 1(=cI_;JKlvnLUgopEfa
283 return [None] * 4 if as_dicts else None 1(cI_;Jlfa
284 # 4 pins represented as uint32
285 # For each pin, shift its bit to the end and check the bit is set
286 gpio = (np.right_shift(np.tile(gpio, (4, 1)).T, np.arange(31, 27, -1)) & 0x1) == 1 1(=KvnLUgopE
288 if as_dicts: 1(=KvnLFUgopE
289 if not gpio.any(): 1(=KnLFUgopE
290 _logger.error('No GPIO changes') 1(
291 return [None] * 4 1(
292 # Find state changes for each pin and construct a dict of indices and polarities for each
293 edges = np.vstack((gpio[0, :], np.diff(gpio.astype(int), axis=0))) 1(=KnLFUgopE
294 # gpio = [(ind := np.where(edges[:, i])[0], edges[ind, i]) for i in range(4)]
295 # gpio = [dict(zip(('indices', 'polarities'), x)) for x in gpio_] # py3.8
296 gpio = [{'indices': np.where(edges[:, i])[0], 1(=KnLFUgopE
297 'polarities': edges[edges[:, i] != 0, i]}
298 for i in range(4)]
299 # Replace empty dicts with None
300 gpio = [None if x['indices'].size == 0 else x for x in gpio] 1(=KnLFUgopE
302 return gpio 1(=KvnLFUgopE
305def _read_settings_json_compatibility_enforced(settings):
306 """
307 Patch iblrig settings for compatibility across rig versions.
309 Parameters
310 ----------
311 settings : pathlib.Path, dict
312 Either a _iblrig_taskSettings.raw.json file path or the loaded settings.
314 Returns
315 -------
316 dict
317 The task settings patched for compatibility.
318 """
319 if isinstance(settings, dict): 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
320 md = settings.copy()
321 else:
322 with open(settings) as js: 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
323 md = json.load(js) 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
324 if 'IS_MOCK' not in md: 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
325 md['IS_MOCK'] = False 2b k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } ~ @ ] ^ ab- . / bbcbdby r R w H s t S * I ; J l n % o p f m h M V A u B { i j T a W ' q x C d
326 if 'IBLRIG_VERSION_TAG' not in md.keys(): 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
327 md['IBLRIG_VERSION_TAG'] = md.get('IBLRIG_VERSION', '') 2ab- . / bbcbdbS
328 # 2018-12-05 Version 3.2.3 fixes (permanent fixes in IBL_RIG from 3.2.4 on)
329 if md['IBLRIG_VERSION_TAG'] == '': 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
330 pass 2+ kbab- . / bbcbdbS * d
331 elif parse_version(md.get('IBLRIG_VERSION_TAG')) >= parse_version('8.0.0'): 2b k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } ~ @ ] ^ y r R w H gbD s t c I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
332 md['SESSION_NUMBER'] = str(md['SESSION_NUMBER']).zfill(3)
333 md['PYBPOD_BOARD'] = md['RIG_NAME']
334 md['PYBPOD_CREATOR'] = (md['ALYX_USER'], '')
335 md['SESSION_DATE'] = md['SESSION_START_TIME'][:10]
336 md['SESSION_DATETIME'] = md['SESSION_START_TIME']
337 elif parse_version(md.get('IBLRIG_VERSION_TAG')) <= parse_version('3.2.3'): 2b k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } ~ @ ] ^ y r R w H gbD s t c I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
338 if 'LAST_TRIAL_DATA' in md.keys(): 1a
339 md.pop('LAST_TRIAL_DATA') 1a
340 if 'weighings' in md['PYBPOD_SUBJECT_EXTRA'].keys(): 1a
341 md['PYBPOD_SUBJECT_EXTRA'].pop('weighings') 1a
342 if 'water_administration' in md['PYBPOD_SUBJECT_EXTRA'].keys(): 1a
343 md['PYBPOD_SUBJECT_EXTRA'].pop('water_administration') 1a
344 if 'IBLRIG_COMMIT_HASH' not in md.keys(): 1a
345 md['IBLRIG_COMMIT_HASH'] = 'f9d8905647dbafe1f9bdf78f73b286197ae2647b' 1a
346 # parse the date format to Django supported ISO
347 dt = dateparser.parse(md['SESSION_DATETIME']) 1a
348 md['SESSION_DATETIME'] = date2isostr(dt) 1a
349 # add the weight key if it doesn't already exist
350 if 'SUBJECT_WEIGHT' not in md: 1a
351 md['SUBJECT_WEIGHT'] = None 1a
352 return md 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
355def load_settings(session_path: Union[str, Path], task_collection='raw_behavior_data'):
356 """
357 Load PyBpod Settings files (.json).
359 [description]
361 :param session_path: Absolute path of session folder
362 :type session_path: str, Path
363 :return: Settings dictionary
364 :rtype: dict
365 """
366 if session_path is None: 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
367 _logger.warning("No data loaded: session_path is None")
368 return
369 path = Path(session_path).joinpath(task_collection) 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
370 path = next(path.glob("_iblrig_taskSettings.raw*.json"), None) 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
371 if not path: 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
372 _logger.warning("No data loaded: could not find raw settings file") 1@fM{
373 return None 1@fM{
374 settings = _read_settings_json_compatibility_enforced(path) 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
375 return settings 2b + k X Y Z 0 1 O 2 3 4 5 6 7 8 9 ! # P $ N G e Q } kb~ @ ] ^ ab- . / bbcbdby r R w H gbD s t c S * I ; J K l v n L F % o p hbibjbf m h z M V A u B { i j T a W ' q x C d
378def load_stim_position_screen(session_path, task_collection='raw_behavior_data'):
379 path = Path(session_path).joinpath(task_collection)
380 path = next(path.glob("_iblrig_stimPositionScreen.raw*.csv"), None)
382 data = pd.read_csv(path, sep=',', header=None, on_bad_lines='skip')
383 data.columns = ['contrast', 'position', 'bns_ts']
384 data['bns_ts'] = pd.to_datetime(data['bns_ts'])
385 return data
388def load_encoder_events(session_path, task_collection='raw_behavior_data', settings=False):
389 """
390 Load Rotary Encoder (RE) events raw data file.
392 Assumes that a folder called "raw_behavior_data" exists in folder.
394 Events number correspond to following bpod states:
395 1: correct / hide_stim
396 2: stim_on
397 3: closed_loop
398 4: freeze_error / freeze_correct
400 >>> data.columns
401 >>> ['re_ts', # Rotary Encoder Timestamp (ms) 'numpy.int64'
402 'sm_ev', # State Machine Event 'numpy.int64'
403 'bns_ts'] # Bonsai Timestamp (int) 'pandas.Timestamp'
404 # pd.to_datetime(data.bns_ts) to work in datetimes
406 :param session_path: [description]
407 :type session_path: [type]
408 :return: dataframe w/ 3 cols and (ntrials * 3) lines
409 :rtype: Pandas.DataFrame
410 """
411 if session_path is None: 1bkNeyrwDstcfmhzAuBijaqxCd
412 return
413 path = Path(session_path).joinpath(task_collection) 1bkNeyrwDstcfmhzAuBijaqxCd
414 path = next(path.glob("_iblrig_encoderEvents.raw*.ssv"), None) 1bkNeyrwDstcfmhzAuBijaqxCd
415 if not settings: 1bkNeyrwDstcfmhzAuBijaqxCd
416 settings = load_settings(session_path, task_collection=task_collection) 1bkNeyrwDstcfmhzAuBijaqxCd
417 if settings is None or not settings.get('IBLRIG_VERSION_TAG'): 1bkNeyrwDstcfmhzAuBijaqxCd
418 settings = {'IBLRIG_VERSION_TAG': '100.0.0'} 1d
419 # auto-detect old files when version is not labeled
420 with open(path) as fid: 1d
421 line = fid.readline() 1d
422 if line.startswith('Event') and 'StateMachine' in line: 1d
423 settings = {'IBLRIG_VERSION_TAG': '0.0.0'} 1d
424 if not path: 1bkNeyrwDstcfmhzAuBijaqxCd
425 return None
426 if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): 1bkNeyrwDstcfmhzAuBijaqxCd
427 return _load_encoder_events_file_ge5(path) 1kNeyrwDstfmhzAuBaqxCd
428 else:
429 return _load_encoder_events_file_lt5(path) 1bkNecijaqd
432def _load_encoder_ssv_file(file_path, **kwargs):
433 file_path = Path(file_path) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
434 if file_path.stat().st_size == 0: 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
435 _logger.error(f"{file_path.name} is an empty file. ")
436 raise ValueError(f"{file_path.name} is an empty file. ABORT EXTRACTION. ")
437 return pd.read_csv(file_path, sep=' ', header=None, on_bad_lines='skip', **kwargs) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
440def _load_encoder_positions_file_lt5(file_path):
441 """
442 File loader without the session overhead
443 :param file_path:
444 :return: dataframe of encoder events
445 """
446 data = _load_encoder_ssv_file(file_path, 1b:?kGe)cijaqd
447 names=['_', 're_ts', 're_pos', 'bns_ts', '__'],
448 usecols=['re_ts', 're_pos', 'bns_ts'])
449 return _groom_wheel_data_lt5(data, label='_iblrig_encoderPositions.raw.ssv', path=file_path) 1b:?kGe)cijaqd
452def _load_encoder_positions_file_ge5(file_path):
453 """
454 File loader without the session overhead
455 :param file_path:
456 :return: dataframe of encoder events
457 """
458 data = _load_encoder_ssv_file(file_path, 1:kGe)yrwDstfmhzAuBaqxCd
459 names=['re_ts', 're_pos', '_'],
460 usecols=['re_ts', 're_pos'])
461 return _groom_wheel_data_ge5(data, label='_iblrig_encoderPositions.raw.ssv', path=file_path) 1:kGe)yrwDstfmhzAuBaqxCd
464def _load_encoder_events_file_lt5(file_path):
465 """
466 File loader without the session overhead
467 :param file_path:
468 :return: dataframe of encoder events
469 """
470 data = _load_encoder_ssv_file(file_path, 1bkNe,cijaqd
471 names=['_', 're_ts', '__', 'sm_ev', 'bns_ts', '___'],
472 usecols=['re_ts', 'sm_ev', 'bns_ts'])
473 return _groom_wheel_data_lt5(data, label='_iblrig_encoderEvents.raw.ssv', path=file_path) 1bkNe,cijaqd
476def _load_encoder_events_file_ge5(file_path):
477 """
478 File loader without the session overhead
479 :param file_path:
480 :return: dataframe of encoder events
481 """
482 data = _load_encoder_ssv_file(file_path, 1kNe,yrwDstfmhzAuBaqxCd
483 names=['re_ts', 'sm_ev', '_'],
484 usecols=['re_ts', 'sm_ev'])
485 return _groom_wheel_data_ge5(data, label='_iblrig_encoderEvents.raw.ssv', path=file_path) 1kNe,yrwDstfmhzAuBaqxCd
488def load_encoder_positions(session_path, task_collection='raw_behavior_data', settings=False):
489 """
490 Load Rotary Encoder (RE) positions from raw data file within a session path.
492 Assumes that a folder called "raw_behavior_data" exists in folder.
493 Positions are RE ticks [-512, 512] == [-180º, 180º]
494 0 == trial stim init position
495 Positive nums are rightwards movements (mouse) or RE CW (mouse)
497 Variable line number, depends on movements.
499 Raw datafile Columns:
500 Position, RE timestamp, RE Position, Bonsai Timestamp
502 Position is always equal to 'Position' so this column was dropped.
504 >>> data.columns
505 >>> ['re_ts', # Rotary Encoder Timestamp (ms) 'numpy.int64'
506 're_pos', # Rotary Encoder position (ticks) 'numpy.int64'
507 'bns_ts'] # Bonsai Timestamp 'pandas.Timestamp'
508 # pd.to_datetime(data.bns_ts) to work in datetimes
510 :param session_path: Absolute path of session folder
511 :type session_path: str
512 :return: dataframe w/ 3 cols and N positions
513 :rtype: Pandas.DataFrame
514 """
515 if session_path is None: 1bkGeyrwDstclfmhzAuBijaqxCd
516 return
517 path = Path(session_path).joinpath(task_collection) 1bkGeyrwDstclfmhzAuBijaqxCd
518 path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None) 1bkGeyrwDstclfmhzAuBijaqxCd
519 if not settings: 1bkGeyrwDstclfmhzAuBijaqxCd
520 settings = load_settings(session_path, task_collection=task_collection) 1bkGeyrwDstclfmhzAuBijaqxCd
521 if settings is None or not settings.get('IBLRIG_VERSION_TAG'): 1bkGeyrwDstclfmhzAuBijaqxCd
522 settings = {'IBLRIG_VERSION_TAG': '100.0.0'} 1d
523 # auto-detect old files when version is not labeled
524 with open(path) as fid: 1d
525 line = fid.readline() 1d
526 if line.startswith('Position'): 1d
527 settings = {'IBLRIG_VERSION_TAG': '0.0.0'} 1d
528 if not path: 1bkGeyrwDstclfmhzAuBijaqxCd
529 _logger.warning("No data loaded: could not find raw encoderPositions file") 1la
530 return None 1la
531 if parse_version(settings['IBLRIG_VERSION_TAG']) >= parse_version('5.0.0'): 1bkGeyrwDstcfmhzAuBijaqxCd
532 return _load_encoder_positions_file_ge5(path) 1kGeyrwDstfmhzAuBaqxCd
533 else:
534 return _load_encoder_positions_file_lt5(path) 1bkGecijaqd
537def load_encoder_trial_info(session_path, task_collection='raw_behavior_data'):
538 """
539 Load Rotary Encoder trial info from raw data file.
541 Assumes that a folder calles "raw_behavior_data" exists in folder.
543 NOTE: Last trial probably inexistent data (Trial info is sent on trial start
544 and data is only saved on trial exit...) max(trialnum) should be N+1 if N
545 is the amount of trial data saved.
547 Raw datafile Columns:
549 >>> data.columns
550 >>> ['trial_num', # Trial Number 'numpy.int64'
551 'stim_pos_init', # Initial position of visual stim 'numpy.int64'
552 'stim_contrast', # Contrast of visual stimulus 'numpy.float64'
553 'stim_freq', # Frequency of gabor patch 'numpy.float64'
554 'stim_angle', # Angle of Gabor 0 = Vertical 'numpy.float64'
555 'stim_gain', # Wheel gain (mm/º of stim) 'numpy.float64'
556 'stim_sigma', # Size of patch 'numpy.float64'
557 'stim_phase', # Phase of gabor 'numpy.float64'
558 'bns_ts' ] # Bonsai Timestamp 'pandas.Timestamp'
559 # pd.to_datetime(data.bns_ts) to work in datetimes
561 :param session_path: Absoulte path of session folder
562 :type session_path: str
563 :return: dataframe w/ 9 cols and ntrials lines
564 :rtype: Pandas.DataFrame
565 """
566 if session_path is None: 2mb
567 return
568 path = Path(session_path).joinpath(task_collection) 2mb
569 path = next(path.glob("_iblrig_encoderTrialInfo.raw*.ssv"), None) 2mb
570 if not path: 2mb
571 return None
572 data = pd.read_csv(path, sep=' ', header=None) 2mb
573 data = data.drop([9], axis=1) 2mb
574 data.columns = ['trial_num', 'stim_pos_init', 'stim_contrast', 'stim_freq', 2mb
575 'stim_angle', 'stim_gain', 'stim_sigma', 'stim_phase', 'bns_ts']
576 # return _groom_wheel_data_lt5(data, label='_iblrig_encoderEvents.raw.ssv', path=path)
577 return data 2mb
580def load_ambient_sensor(session_path, task_collection='raw_behavior_data'):
581 """
582 Load Ambient Sensor data from session.
584 Probably could be extracted to DatasetTypes:
585 _ibl_trials.temperature_C, _ibl_trials.airPressure_mb,
586 _ibl_trials.relativeHumidity
587 Returns a list of dicts one dict per trial.
588 dict keys are:
589 dict_keys(['Temperature_C', 'AirPressure_mb', 'RelativeHumidity'])
591 :param session_path: Absoulte path of session folder
592 :type session_path: str
593 :return: list of dicts
594 :rtype: list
595 """
596 if session_path is None:
597 return
598 path = Path(session_path).joinpath(task_collection)
599 path = next(path.glob("_iblrig_ambientSensorData.raw*.jsonable"), None)
600 if not path:
601 return None
602 data = []
603 with open(path, 'r') as f:
604 for line in f:
605 data.append(json.loads(line))
606 return data
609def load_mic(session_path, task_collection='raw_behavior_data'):
610 """
611 Load Microphone wav file to np.array of len nSamples
613 :param session_path: Absolute path of session folder
614 :type session_path: str
615 :return: An array of values of the sound waveform
616 :rtype: numpy.array
617 """
618 if session_path is None:
619 return
620 path = Path(session_path).joinpath(task_collection)
621 path = next(path.glob("_iblrig_micData.raw*.wav"), None)
622 if not path:
623 return None
624 fp = wave.open(path)
625 nchan = fp.getnchannels()
626 N = fp.getnframes()
627 dstr = fp.readframes(N * nchan)
628 data = np.frombuffer(dstr, np.int16)
629 data = np.reshape(data, (-1, nchan))
630 return data
633def _clean_wheel_dataframe(data, label, path):
634 if np.any(data.isna()): 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
635 _logger.warning(label + ' has missing/incomplete records \n %s', path) 1b?kGe,)ijqd
636 # first step is to re-interpret as numeric objects if not already done
637 for col in data.columns: 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
638 if data[col].dtype == object and col not in ['bns_ts']: 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
639 data[col] = pd.to_numeric(data[col], errors='coerce') 1,d
640 # then drop Nans and duplicates
641 data.dropna(inplace=True) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
642 data.drop_duplicates(keep='first', inplace=True) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
643 data.reset_index(inplace=True) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
644 # handle the clock resets when microseconds exceed uint32 max value
645 drop_first = False 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
646 data['re_ts'] = data['re_ts'].astype(np.double, copy=False) 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
647 if any(np.diff(data['re_ts']) < 0): 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
648 ind = np.where(np.diff(data['re_ts']) < 0)[0] 1:?kGe,)rwstchuad
649 for i in ind: 1:?kGe,)rwstchuad
650 # the first sample may be corrupt, in this case throw away
651 if i <= 1: 1:?kGe,)rwstchuad
652 drop_first = i 1:Ge)ca
653 _logger.warning(label + ' rotary encoder positions timestamps' 1:Ge)ca
654 ' first sample corrupt ' + str(path))
655 # if it's an uint32 wraparound, the diff should be close to 2 ** 32
656 elif 32 - np.log2(data['re_ts'][i] - data['re_ts'][i + 1]) < 0.2: 1:?kGe,)rwsthud
657 data.loc[i + 1:, 're_ts'] = data.loc[i + 1:, 're_ts'] + 2 ** 32 1?kGe)rwsthd
658 # there is also the case where 2 positions are swapped and need to be swapped back
660 elif data['re_ts'][i] > data['re_ts'][i + 1] > data['re_ts'][i - 1]: 1:Ge,)rstud
661 _logger.warning(label + ' rotary encoder timestamps swapped at index: ' + 1:Ge)rstud
662 str(i) + ' ' + str(path))
663 a, b = data.iloc[i].copy(), data.iloc[i + 1].copy() 1:Ge)rstud
664 data.iloc[i], data.iloc[i + 1] = b, a 1:Ge)rstud
665 # if none of those 3 cases apply, raise an error
666 else:
667 _logger.error(label + ' Rotary encoder timestamps are not sorted.' + str(path)) 1,
668 data.sort_values('re_ts', inplace=True) 1,
669 data.reset_index(inplace=True) 1,
670 if drop_first is not False: 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
671 data.drop(data.loc[:drop_first].index, inplace=True) 1:Ge)ca
672 data = data.reindex() 1:Ge)ca
673 return data 1b:?kNGe,)yrwDstcfmhzAuBijaqxCd
676def _groom_wheel_data_lt5(data, label='file ', path=''):
677 """
678 The whole purpose of this function is to account for variability and corruption in
679 the wheel position files. There are many possible errors described below, but
680 nothing excludes getting new ones.
681 """
682 data = _clean_wheel_dataframe(data, label, path) 1b:?kNGe,)cijaqd
683 data.drop(data.loc[data.bns_ts.apply(len) != 33].index, inplace=True) 1b:?kNGe,)cijaqd
684 # check if the time scale is in ms
685 sess_len_sec = (datetime.strptime(data['bns_ts'].iloc[-1][:25], '%Y-%m-%dT%H:%M:%S.%f') - 1b:?kNGe,)cijaqd
686 datetime.strptime(data['bns_ts'].iloc[0][:25], '%Y-%m-%dT%H:%M:%S.%f')).seconds
687 if data['re_ts'].iloc[-1] / (sess_len_sec + 1e-6) < 1e5: # should be 1e6 normally 1b:?kNGe,)cijaqd
688 _logger.warning('Rotary encoder reset logs events in ms instead of us: ' + 1Ne,ca
689 'RE firmware needs upgrading and wheel velocity is potentially inaccurate')
690 data['re_ts'] = data['re_ts'] * 1000 1Ne,ca
691 return data 1b:?kNGe,)cijaqd
694def _groom_wheel_data_ge5(data, label='file ', path=''):
695 """
696 The whole purpose of this function is to account for variability and corruption in
697 the wheel position files. There are many possible errors described below, but
698 nothing excludes getting new ones.
699 """
700 data = _clean_wheel_dataframe(data, label, path) 1:kNGe,)yrwDstfmhzAuBaqxCd
701 # check if the time scale is in ms
702 if (data['re_ts'].iloc[-1] - data['re_ts'].iloc[0]) / 1e6 < 20: 1:kNGe,)yrwDstfmhzAuBaqxCd
703 _logger.warning('Rotary encoder reset logs events in ms instead of us: ' + 1,)qxd
704 'RE firmware needs upgrading and wheel velocity is potentially inaccurate')
705 data['re_ts'] = data['re_ts'] * 1000 1,)qxd
706 return data 1:kNGe,)yrwDstfmhzAuBaqxCd
709def save_bool(save, dataset_type):
710 if isinstance(save, bool):
711 out = save
712 elif isinstance(save, list):
713 out = (dataset_type in save) or (Path(dataset_type).stem in save)
714 if out:
715 _logger.debug('extracting' + dataset_type)
716 return out
719def sync_trials_robust(t0, t1, diff_threshold=0.001, drift_threshold_ppm=200, max_shift=5,
720 return_index=False):
721 """
722 Attempts to find matching timestamps in 2 time-series that have an offset, are drifting,
723 and are most likely incomplete: sizes don't have to match, some pulses may be missing
724 in any series.
725 Only works with irregular time series as it relies on the derivative to match sync.
726 :param t0:
727 :param t1:
728 :param diff_threshold:
729 :param drift_threshold_ppm: (150)
730 :param max_shift: (200)
731 :param return_index (False)
732 :return:
733 """
734 nsync = min(t0.size, t1.size) 1[ad
735 dt0 = np.diff(t0) 1[ad
736 dt1 = np.diff(t1) 1[ad
737 ind = np.zeros_like(dt0) * np.nan 1[ad
738 i0 = 0 1[ad
739 i1 = 0 1[ad
740 cdt = np.nan # the current time difference between the two series to compute drift 1[ad
741 while i0 < (nsync - 1): 1[ad
742 # look in the next max_shift events the ones whose derivative match
743 isearch = np.arange(i1, min(max_shift + i1, dt1.size)) 1[ad
744 dec = np.abs(dt0[i0] - dt1[isearch]) < diff_threshold 1[ad
745 # another constraint is to check the dt for the maximum drift
746 if ~np.isnan(cdt): 1[ad
747 drift_ppm = np.abs((cdt - (t0[i0] - t1[isearch])) / dt1[isearch]) * 1e6 1[ad
748 dec = np.logical_and(dec, drift_ppm <= drift_threshold_ppm) 1[ad
749 # if one is found
750 if np.any(dec): 1[ad
751 ii1 = np.where(dec)[0][0] 1[ad
752 ind[i0] = i1 + ii1 1[ad
753 i1 += ii1 + 1 1[ad
754 cdt = t0[i0 + 1] - t1[i1 + ii1] 1[ad
755 i0 += 1 1[ad
756 it0 = np.where(~np.isnan(ind))[0] 1[ad
757 it1 = ind[it0].astype(int) 1[ad
758 ind0 = np.unique(np.r_[it0, it0 + 1]) 1[ad
759 ind1 = np.unique(np.r_[it1, it1 + 1]) 1[ad
760 if return_index: 1[ad
761 return t0[ind0], t1[ind1], ind0, ind1
762 else:
763 return t0[ind0], t1[ind1] 1[ad
766def load_bpod_fronts(session_path: str, data: list = False, task_collection: str = 'raw_behavior_data') -> list:
767 """load_bpod_fronts
768 Loads BNC1 and BNC2 bpod channels times and polarities from session_path
770 :param session_path: a valid session_path
771 :type session_path: str
772 :param data: pre-loaded raw data dict, defaults to False
773 :type data: list, optional
774 :return: List of dicts BNC1 and BNC2 {"times": np.array, "polarities":np.array}
775 :rtype: list
776 """
777 if not data: 1bHclvngopEmhija
778 data = load_data(session_path, task_collection)
780 BNC1_fronts = np.array([[np.nan, np.nan]]) 1bHclvngopEmhija
781 BNC2_fronts = np.array([[np.nan, np.nan]]) 1bHclvngopEmhija
782 for tr in data: 1bHclvngopEmhija
783 BNC1_fronts = np.append( 1bHclvngopEmhija
784 BNC1_fronts,
785 np.array(
786 [
787 [x, 1]
788 for x in tr["behavior_data"]["Events timestamps"].get("BNC1High", [np.nan])
789 ]
790 ),
791 axis=0,
792 )
793 BNC1_fronts = np.append( 1bHclvngopEmhija
794 BNC1_fronts,
795 np.array(
796 [
797 [x, -1]
798 for x in tr["behavior_data"]["Events timestamps"].get("BNC1Low", [np.nan])
799 ]
800 ),
801 axis=0,
802 )
803 BNC2_fronts = np.append( 1bHclvngopEmhija
804 BNC2_fronts,
805 np.array(
806 [
807 [x, 1]
808 for x in tr["behavior_data"]["Events timestamps"].get("BNC2High", [np.nan])
809 ]
810 ),
811 axis=0,
812 )
813 BNC2_fronts = np.append( 1bHclvngopEmhija
814 BNC2_fronts,
815 np.array(
816 [
817 [x, -1]
818 for x in tr["behavior_data"]["Events timestamps"].get("BNC2Low", [np.nan])
819 ]
820 ),
821 axis=0,
822 )
824 BNC1_fronts = BNC1_fronts[1:, :] 1bHclvngopEmhija
825 BNC1_fronts = BNC1_fronts[BNC1_fronts[:, 0].argsort()] 1bHclvngopEmhija
826 BNC2_fronts = BNC2_fronts[1:, :] 1bHclvngopEmhija
827 BNC2_fronts = BNC2_fronts[BNC2_fronts[:, 0].argsort()] 1bHclvngopEmhija
829 BNC1 = {"times": BNC1_fronts[:, 0], "polarities": BNC1_fronts[:, 1]} 1bHclvngopEmhija
830 BNC2 = {"times": BNC2_fronts[:, 0], "polarities": BNC2_fronts[:, 1]} 1bHclvngopEmhija
832 return [BNC1, BNC2] 1bHclvngopEmhija
835def get_port_events(trial: dict, name: str = '') -> list:
836 """get_port_events
837 Return all event timestamps from bpod raw data trial that match 'name'
838 --> looks in trial['behavior_data']['Events timestamps']
840 :param trial: raw trial dict
841 :type trial: dict
842 :param name: name of event, defaults to ''
843 :type name: str, optional
844 :return: Sorted list of event timestamps
845 :rtype: list
846 TODO: add polarities?
847 """
848 out: list = [] 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
849 events = trial['behavior_data']['Events timestamps'] 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
850 for k in events: 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
851 if name in k: 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
852 out.extend(events[k]) 1kOPeQyrRwHDstcSfmhzAuBTaqxC
853 out = sorted(out) 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
855 return out 1bkOPeQyrRwHDstcSfmhzAuBijTaqxC
858def load_widefield_mmap(session_path, dtype=np.uint16, shape=(540, 640), n_frames=None, mode='r'):
859 """
860 TODO Document this function
862 Parameters
863 ----------
864 session_path
866 Returns
867 -------
869 """
870 filepath = Path(session_path).joinpath('raw_widefield_data').glob('widefield.raw.*.dat')
871 filepath = next(filepath, None)
872 if not filepath:
873 _logger.warning("No data loaded: could not find raw data file")
874 return None
876 if type(dtype) is str:
877 dtype = np.dtype(dtype)
879 if n_frames is None:
880 # Get the number of samples from the file size
881 n_frames = int(filepath.stat().st_size / (np.prod(shape) * dtype.itemsize))
883 return np.memmap(str(filepath), mode=mode, dtype=dtype, shape=(int(n_frames), *shape))
886def patch_settings(session_path, collection='raw_behavior_data',
887 new_collection=None, subject=None, number=None, date=None):
888 """Modify various details in a settings file.
890 This function makes it easier to change things like subject name in a settings as it will
891 modify the subject name in the myriad paths. NB: This saves the settings into the same location
892 it was loaded from.
894 Parameters
895 ----------
896 session_path : str, pathlib.Path
897 The session path containing the settings file.
898 collection : str
899 The subfolder containing the settings file.
900 new_collection : str
901 An optional new subfolder to change in the settings paths.
902 subject : str
903 An optional new subject name to change in the settings.
904 number : str, int
905 An optional new number to change in the settings.
906 date : str, datetime.date
907 An optional date to change in the settings.
909 Returns
910 -------
911 dict
912 The modified settings.
913 """
914 settings = load_settings(session_path, collection) 1b-./M
915 if not settings: 1b-./M
916 raise IOError('Settings file not found') 1M
918 filename = PureWindowsPath(settings['SETTINGS_FILE_PATH']).name 1b-./M
919 file_path = Path(session_path).joinpath(collection, filename) 1b-./M
921 if subject: 1b-./M
922 # Patch subject name
923 old_subject = settings['SUBJECT_NAME'] 1bM
924 settings['SUBJECT_NAME'] = subject 1bM
925 for k in settings.keys(): 1bM
926 if isinstance(settings[k], str): 1bM
927 settings[k] = settings[k].replace(f'\\Subjects\\{old_subject}', f'\\Subjects\\{subject}') 1bM
928 settings['SESSION_NAME'] = '\\'.join([subject, *settings['SESSION_NAME'].split('\\')[1:]]) 1bM
929 settings.pop('PYBPOD_SUBJECT_EXTRA', None) # Get rid of Alyx subject info 1bM
931 if date: 1b-./M
932 # Patch session datetime
933 date = str(date) 1bM
934 old_date = settings['SESSION_DATE'] 1bM
935 settings['SESSION_DATE'] = date 1bM
936 for k in settings.keys(): 1bM
937 if isinstance(settings[k], str): 1bM
938 settings[k] = settings[k].replace( 1bM
939 f'\\{settings["SUBJECT_NAME"]}\\{old_date}',
940 f'\\{settings["SUBJECT_NAME"]}\\{date}'
941 )
942 settings['SESSION_DATETIME'] = date + settings['SESSION_DATETIME'][10:] 1bM
944 if number: 1b-./M
945 # Patch session number
946 old_number = settings['SESSION_NUMBER'] 1b-./M
947 if isinstance(number, int): 1b-./M
948 number = f'{number:03}' 1M
949 settings['SESSION_NUMBER'] = number 1b-./M
950 for k in settings.keys(): 1b-./M
951 if isinstance(settings[k], str): 1b-./M
952 settings[k] = settings[k].replace( 1b-./M
953 f'\\{settings["SESSION_DATE"]}\\{old_number}',
954 f'\\{settings["SESSION_DATE"]}\\{number}'
955 )
957 if new_collection: 1b-./M
958 old_path = settings['SESSION_RAW_DATA_FOLDER'] 1-./M
959 new_path = PureWindowsPath(settings['SESSION_RAW_DATA_FOLDER']).with_name(new_collection) 1-./M
960 for k in settings.keys(): 1-./M
961 if isinstance(settings[k], str): 1-./M
962 settings[k] = settings[k].replace(old_path, str(new_path)) 1-./M
963 with open(file_path, 'w') as fp: 1b-./M
964 json.dump(settings, fp, indent=' ') 1b-./M
965 return settings 1b-./M