Coverage for ibllib/qc/camera.py: 78%

666 statements  

« prev     ^ index     » next       coverage.py v7.5.4, created at 2024-07-08 17:16 +0100

1"""Video quality control. 

2 

3This module runs a list of quality control metrics on the camera and extracted video data. 

4 

5Examples 

6-------- 

7Run right camera QC, downloading all but video file 

8 

9>>> qc = CameraQC(eid, 'right', download_data=True, stream=True) 

10>>> qc.run() 

11 

12Run left camera QC with session path, update QC field in Alyx 

13 

14>>> qc = CameraQC(session_path, 'left') 

15>>> outcome, extended = qc.run(update=True) # Returns outcome of videoQC only 

16>>> print(f'video QC = {outcome}; overall session QC = {qc.outcome}') # NB difference outcomes 

17 

18Run only video QC (no timestamp/alignment checks) on 20 frames for the body camera 

19 

20>>> qc = CameraQC(eid, 'body', n_samples=20) 

21>>> qc.load_video_data() # Quicker than loading all data 

22>>> qc.run() 

23 

24Run specific video QC check and display the plots 

25 

26>>> qc = CameraQC(eid, 'left') 

27>>> qc.load_data(download_data=True) 

28>>> qc.check_position(display=True) # NB: Not all checks make plots 

29 

30Run the QC for all cameras 

31 

32>>> qcs = run_all_qc(eid) 

33>>> qcs['left'].metrics # Dict of checks and outcomes for left camera 

34""" 

35import logging 

36from inspect import getmembers, isfunction 

37from pathlib import Path 

38from itertools import chain 

39import copy 

40 

41import cv2 

42import numpy as np 

43import matplotlib.pyplot as plt 

44import pandas as pd 

45from matplotlib.patches import Rectangle 

46 

47import one.alf.io as alfio 

48from one.util import filter_datasets 

49from one.alf import spec 

50from one.alf.exceptions import ALFObjectNotFound 

51from iblutil.util import Bunch 

52from iblutil.numerical import within_ranges 

53 

54from ibllib.io.extractors.camera import extract_camera_sync, extract_all 

55from ibllib.io.extractors import ephys_fpga, training_wheel, mesoscope 

56from ibllib.io.extractors.video_motion import MotionAlignment 

57from ibllib.io.extractors.base import get_session_extractor_type 

58from ibllib.io import raw_data_loaders as raw 

59from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap 

60from ibllib.io.session_params import read_params, get_sync, get_sync_namespace 

61import brainbox.behavior.wheel as wh 

62from ibllib.io.video import get_video_meta, get_video_frames_preload, assert_valid_label 

63from . import base 

64 

65_log = logging.getLogger(__name__) 

66 

67try: 

68 from labcams import parse_cam_log 

69except ImportError: 

70 _log.warning('labcams not installed') 

71 

72 

73class CameraQC(base.QC): 

74 """A class for computing camera QC metrics.""" 

75 

76 dstypes = [ 

77 '_ibl_experiment.description', 

78 '_iblrig_Camera.frameData', # Replaces the next 3 datasets 

79 '_iblrig_Camera.frame_counter', 

80 '_iblrig_Camera.GPIO', 

81 '_iblrig_Camera.timestamps', 

82 '_iblrig_taskData.raw', 

83 '_iblrig_taskSettings.raw', 

84 '_iblrig_Camera.raw', 

85 'camera.times', 

86 'wheel.position', 

87 'wheel.timestamps' 

88 ] 

89 dstypes_fpga = [ 

90 '_spikeglx_sync.channels', 

91 '_spikeglx_sync.polarities', 

92 '_spikeglx_sync.times', 

93 'ephysData.raw.meta' 

94 ] 

95 """Recall that for the training rig there is only one side camera at 30 Hz and 1280 x 1024 px. 

96 For the recording rig there are two label cameras (left: 60 Hz, 1280 x 1024 px; 

97 right: 150 Hz, 640 x 512 px) and one body camera (30 Hz, 640 x 512 px). """ 

98 video_meta = { 

99 'training': { 

100 'left': { 

101 'fps': 30, 

102 'width': 1280, 

103 'height': 1024 

104 } 

105 }, 

106 'ephys': { 

107 'left': { 

108 'fps': 60, 

109 'width': 1280, 

110 'height': 1024 

111 }, 

112 'right': { 

113 'fps': 150, 

114 'width': 640, 

115 'height': 512 

116 }, 

117 'body': { 

118 'fps': 30, 

119 'width': 640, 

120 'height': 512 

121 }, 

122 } 

123 } 

124 

125 def __init__(self, session_path_or_eid, camera, **kwargs): 

126 """ 

127 Compute and view camera QC metrics. 

128 

129 :param session_path_or_eid: A session id or path 

130 :param camera: The camera to run QC on, if None QC is run for all three cameras 

131 :param n_samples: The number of frames to sample for the position and brightness QC 

132 :param stream: If true and local video files not available, the data are streamed from 

133 the remote source. 

134 :param log: A logging.Logger instance, if None the 'ibllib' logger is used 

135 :param one: An ONE instance for fetching and setting the QC on Alyx 

136 """ 

137 # When an eid is provided, we will download the required data by default (if necessary) 

138 download_data = not spec.is_session_path(Path(session_path_or_eid)) 1fcaihbd

139 self.download_data = kwargs.pop('download_data', download_data) 1fcaihbd

140 self.stream = kwargs.pop('stream', None) 1fcaihbd

141 self.n_samples = kwargs.pop('n_samples', 100) 1fcaihbd

142 self.sync_collection = kwargs.pop('sync_collection', None) 1fcaihbd

143 self.sync = kwargs.pop('sync_type', None) 1fcaihbd

144 super().__init__(session_path_or_eid, **kwargs) 1fcaihbd

145 

146 # Data 

147 self.label = assert_valid_label(camera) 1fcaihbd

148 filename = f'_iblrig_{self.label}Camera.raw*.mp4' 1fcaihbd

149 raw_video_path = self.session_path.joinpath('raw_video_data') 1fcaihbd

150 self.video_path = next(raw_video_path.glob(filename), None) 1fcaihbd

151 

152 # If local video doesn't exist, change video path to URL 

153 if not self.video_path and self.stream is not False and self.one is not None: 1fcaihbd

154 try: 

155 self.stream = True 

156 self.video_path = self.one.path2url(raw_video_path / filename.replace('*', '')) 

157 except (StopIteration, ALFObjectNotFound): 

158 _log.error('No remote or local video file found') 

159 self.video_path = None 

160 

161 logging.disable(logging.NOTSET) 1fcaihbd

162 keys = ('count', 'pin_state', 'audio', 'fpga_times', 'wheel', 'video', 1fcaihbd

163 'frame_samples', 'timestamps', 'camera_times', 'bonsai_times') 

164 self.data = Bunch.fromkeys(keys) 1fcaihbd

165 self.frame_samples_idx = None 1fcaihbd

166 

167 # QC outcomes map 

168 self.metrics = None 1fcaihbd

169 self.outcome = spec.QC.NOT_SET 1fcaihbd

170 

171 # Specify any checks to remove 

172 self.checks_to_remove = [] 1fcaihbd

173 self._type = None 1fcaihbd

174 

175 @property 

176 def type(self): 

177 """ 

178 Returns the camera type based on the protocol. 

179 

180 :return: Returns either None, 'ephys' or 'training' 

181 """ 

182 if not self._type: 1xvqkcaihbgd

183 return 1aib

184 else: 

185 return 'ephys' if 'ephys' in self._type else 'training' 1xvqkcaihbgd

186 

187 def load_data(self, download_data: bool = None, extract_times: bool = False, load_video: bool = True) -> None: 

188 """Extract the data from raw data files. 

189 

190 Extracts all the required task data from the raw data files. 

191 

192 Data keys: 

193 - count (int array): the sequential frame number (n, n+1, n+2...) 

194 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame 

195 - audio (float array): timestamps of audio TTL fronts 

196 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA 

197 - timestamps (float array): extracted video timestamps (the camera.times ALF) 

198 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame 

199 - camera_times (float array): camera frame timestamps extracted from frame headers 

200 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion 

201 - video (Bunch): video meta data, including dimensions and FPS 

202 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel) 

203 

204 :param download_data: if True, any missing raw data is downloaded via ONE. 

205 Missing data will raise an AssertionError 

206 :param extract_times: if True, the camera.times are re-extracted from the raw data 

207 :param load_video: if True, calls the load_video_data method 

208 """ 

209 assert self.session_path, 'no session path set' 1caihbd

210 if download_data is not None: 1caihbd

211 self.download_data = download_data 1ab

212 if self.download_data and self.eid and self.one and not self.one.offline: 1caihbd

213 self.ensure_required_data() 

214 _log.info('Gathering data for QC') 1caihbd

215 

216 # Get frame count and pin state 

217 self.data['count'], self.data['pin_state'] = \ 1caihbd

218 raw.load_embedded_frame_data(self.session_path, self.label, raw=True) 

219 

220 # If there is an experiment description and there are video parameters 

221 sess_params = read_params(self.session_path) or {} 1caihbd

222 task_collection = get_task_collection(sess_params) 1caihbd

223 ns = get_sync_namespace(sess_params) 1caihbd

224 self._set_sync(sess_params) 1caihbd

225 if not self.sync: 1caihbd

226 if not self.type: 1aib

227 self._type = get_session_extractor_type(self.session_path, task_collection) 1aib

228 self.sync = 'nidq' if 'ephys' in self.type else 'bpod' 1aib

229 self._update_meta_from_session_params(sess_params) 1caihbd

230 

231 # Load the audio and raw FPGA times 

232 if self.sync != 'bpod' and self.sync is not None: 1caihbd

233 self.sync_collection = self.sync_collection or 'raw_ephys_data' 1ah

234 ns = ns or 'spikeglx' 1ah

235 if ns == 'spikeglx': 1ah

236 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) 1ah

237 elif ns == 'timeline': 1h

238 sync, chmap = load_timeline_sync_and_chmap(self.session_path / self.sync_collection) 

239 else: 

240 raise NotImplementedError(f'Unknown namespace "{ns}"') 1h

241 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) 1ah

242 self.data['audio'] = audio_ttls['times'] # Get rises 1ah

243 # Load raw FPGA times 

244 cam_ts = extract_camera_sync(sync, chmap) 1ah

245 self.data['fpga_times'] = cam_ts[self.label] 1ah

246 else: 

247 self.sync_collection = self.sync_collection or task_collection 1cihbd

248 bpod_data = raw.load_data(self.session_path, task_collection) 1cihbd

249 _, audio_ttls = raw.load_bpod_fronts( 1cihbd

250 self.session_path, data=bpod_data, task_collection=task_collection) 

251 self.data['audio'] = audio_ttls['times'] 1cihbd

252 

253 # Load extracted frame times 

254 alf_path = self.session_path / 'alf' 1caihbd

255 try: 1caihbd

256 assert not extract_times 1caihbd

257 self.data['timestamps'] = alfio.load_object( 1cihd

258 alf_path, f'{self.label}Camera', short_keys=True)['times'] 

259 except AssertionError: # Re-extract 1aib

260 kwargs = dict(video_path=self.video_path, labels=self.label) 1ab

261 if self.sync != 'bpod' and self.sync is not None: 1ab

262 kwargs = {**kwargs, 'sync': sync, 'chmap': chmap} # noqa 1a

263 outputs, _ = extract_all(self.session_path, self.sync, save=False, 1ab

264 sync_collection=self.sync_collection, **kwargs) 

265 self.data['timestamps'] = outputs[f'{self.label}_camera_timestamps'] 1ab

266 except ALFObjectNotFound: 1i

267 _log.warning('no camera.times ALF found for session') 1i

268 

269 # Get audio and wheel data 

270 wheel_keys = ('timestamps', 'position') 1caihbd

271 try: 1caihbd

272 # glob in case wheel data are in sub-collections 

273 alf_path = next(alf_path.rglob('*wheel.timestamps*')).parent 1caihbd

274 self.data['wheel'] = alfio.load_object(alf_path, 'wheel', short_keys=True) 1cahbd

275 except (StopIteration, ALFObjectNotFound): 1i

276 # Extract from raw data 

277 if self.sync != 'bpod' and self.sync is not None: 1i

278 if ns == 'spikeglx': 

279 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap) 

280 elif ns == 'timeline': 

281 extractor = mesoscope.TimelineTrials(self.session_path, sync_collection=self.sync_collection) 

282 wheel_data = extractor.extract_wheel_sync() 

283 else: 

284 raise NotImplementedError(f'Unknown namespace "{ns}"') 

285 else: 

286 wheel_data = training_wheel.get_wheel_position( 1i

287 self.session_path, task_collection=task_collection) 

288 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) 1i

289 

290 # Find short period of wheel motion for motion correlation. 

291 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: 1caihbd

292 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel']) 1cahbd

293 

294 # Load Bonsai frame timestamps 

295 try: 1caihbd

296 ssv_times = raw.load_camera_ssv_times(self.session_path, self.label) 1caihbd

297 self.data['bonsai_times'], self.data['camera_times'] = ssv_times 1caihbd

298 except AssertionError: 

299 _log.warning('No Bonsai video timestamps file found') 

300 

301 # Gather information from video file 

302 if load_video: 1caihbd

303 _log.info('Inspecting video file...') 1caibd

304 self.load_video_data() 1caibd

305 

306 def load_video_data(self): 

307 """Get basic properties of video. 

308 

309 Updates the `data` property with video metadata such as length and frame count, as well as 

310 loading some frames to perform QC on. 

311 """ 

312 try: 1fcaibd

313 self.data['video'] = get_video_meta(self.video_path, one=self.one) 1fcaibd

314 # Sample some frames from the video file 

315 indices = np.linspace(100, self.data['video'].length - 100, self.n_samples).astype(int) 1fcaibd

316 self.frame_samples_idx = indices 1fcaibd

317 self.data['frame_samples'] = get_video_frames_preload(self.video_path, indices, 1fcaibd

318 mask=np.s_[:, :, 0]) 

319 except AssertionError: 

320 _log.error('Failed to read video file; setting outcome to CRITICAL') 

321 self._outcome = spec.QC.CRITICAL 

322 

323 @staticmethod 

324 def get_active_wheel_period(wheel, duration_range=(3., 20.), display=False): 

325 """ 

326 Find period of active wheel movement. 

327 

328 Attempts to find a period of movement where the wheel accelerates and decelerates for the 

329 wheel motion alignment QC. 

330 

331 :param wheel: A Bunch of wheel timestamps and position data 

332 :param duration_range: The candidates must be within min/max duration range 

333 :param display: If true, plot the selected wheel movement 

334 :return: 2-element array comprising the start and end times of the active period 

335 """ 

336 pos, ts = wh.interpolate_position(wheel.timestamps, wheel.position) 1ocahbd

337 v, acc = wh.velocity_filtered(pos, 1000) 1ocahbd

338 on, off, *_ = wh.movements(ts, acc, pos_thresh=.1, make_plots=False) 1ocahbd

339 edges = np.c_[on, off] 1ocahbd

340 indices, _ = np.where(np.logical_and( 1ocahbd

341 np.diff(edges) > duration_range[0], np.diff(edges) < duration_range[1])) 

342 if len(indices) == 0: 1ocahbd

343 _log.warning('No period of wheel movement found for motion alignment.') 1o

344 return None 1o

345 # Pick movement somewhere in the middle 

346 i = indices[int(indices.size / 2)] 1cahbd

347 if display: 1cahbd

348 _, (ax0, ax1) = plt.subplots(2, 1, sharex='all') 

349 mask = np.logical_and(ts > edges[i][0], ts < edges[i][1]) 

350 ax0.plot(ts[mask], pos[mask]) 

351 ax1.plot(ts[mask], acc[mask]) 

352 return edges[i] 1cahbd

353 

354 def ensure_required_data(self): 

355 """Ensure the datasets required for QC are local. 

356 

357 If the download_data attribute is True, any missing data are downloaded. If all the data 

358 are not present locally at the end of it an exception is raised. If the stream attribute 

359 is True, the video file is not required to be local, however it must be remotely accessible. 

360 NB: Requires a valid instance of ONE and a valid session eid. 

361 

362 Raises 

363 ------ 

364 AssertionError 

365 The data requires for complete QC are not present. 

366 """ 

367 assert self.one is not None, 'ONE required to download data' 1k

368 

369 sess_params = {} 1k

370 if self.download_data: 1k

371 dset = self.one.list_datasets(self.session_path, '*experiment.description*', details=True) 

372 if self.one._check_filesystem(dset): 

373 sess_params = read_params(self.session_path) or {} 

374 else: 

375 sess_params = read_params(self.session_path) or {} 1k

376 self._set_sync(sess_params) 1k

377 

378 # Get extractor type 

379 is_ephys = 'ephys' in (self.type or self.one.get_details(self.eid)['task_protocol']) 1k

380 self.sync = self.sync or ('nidq' if is_ephys else 'bpod') 1k

381 

382 is_fpga = 'bpod' not in self.sync 1k

383 

384 # dataset collections outside this list are ignored (e.g. probe00, raw_passive_data) 

385 collections = ( 1k

386 'alf', '', get_task_collection(sess_params), get_video_collection(sess_params, self.label) 

387 ) 

388 dtypes = self.dstypes + self.dstypes_fpga if is_fpga else self.dstypes 1k

389 assert_unique = True 1k

390 # Check we have raw ephys data for session 

391 if is_ephys: 1k

392 if len(self.one.list_datasets(self.eid, collection='raw_ephys_data')) == 0: 1k

393 # Assert 3A probe model; if so download all probe data 

394 det = self.one.get_details(self.eid, full=True) 

395 probe_model = next(x['model'] for x in det['probe_insertion']) 

396 assert probe_model == '3A', 'raw ephys data missing' 

397 collections += (self.sync_collection or 'raw_ephys_data',) 

398 if sess_params: 

399 probes = sess_params.get('devices', {}).get('neuropixel', {}) 

400 probes = set(x.get('collection') for x in chain(*map(dict.values, probes))) 

401 collections += tuple(probes) 

402 else: 

403 collections += ('raw_ephys_data/probe00', 'raw_ephys_data/probe01') 

404 assert_unique = False 

405 else: 

406 # 3B probes have data in root collection 

407 collections += ('raw_ephys_data',) 1k

408 for dstype in dtypes: 1k

409 datasets = self.one.type2datasets(self.eid, dstype, details=True) 1k

410 if 'camera' in dstype.lower(): # Download individual camera file 1k

411 datasets = filter_datasets(datasets, filename=f'.*{self.label}.*') 1k

412 else: # Ignore probe datasets, etc. 

413 _datasets = filter_datasets(datasets, collection=collections, assert_unique=assert_unique) 1k

414 if '' in collections: # Must be handled as a separate query 1k

415 datasets = filter_datasets(datasets, collection='', assert_unique=assert_unique) 1k

416 datasets = pd.concat([datasets, _datasets]).sort_index() 1k

417 else: 

418 datasets = _datasets 

419 

420 if any(x.endswith('.mp4') for x in datasets.rel_path) and self.stream: 1k

421 names = [x.split('/')[-1] for x in self.one.list_datasets(self.eid, details=False)] 

422 assert f'_iblrig_{self.label}Camera.raw.mp4' in names, 'No remote video file found' 

423 continue 

424 optional = ('camera.times', '_iblrig_Camera.raw', 'wheel.position', 'wheel.timestamps', 1k

425 '_iblrig_Camera.timestamps', '_iblrig_Camera.frame_counter', '_iblrig_Camera.GPIO', 

426 '_iblrig_Camera.frameData', '_ibl_experiment.description') 

427 present = ( 1k

428 self.one._check_filesystem(datasets) 

429 if self.download_data 

430 else (next(self.session_path.rglob(d), None) for d in datasets['rel_path']) 

431 ) 

432 

433 required = (dstype not in optional) 1k

434 all_present = not datasets.empty and all(present) 1k

435 assert all_present or not required, f'Dataset {dstype} not found' 1k

436 

437 if not self.type and self.sync != 'nidq': 

438 self._type = get_session_extractor_type(self.session_path) 

439 

440 def _set_sync(self, session_params=False): 

441 """Set the sync and sync_collection attributes if not already set. 

442 

443 Also set the type attribute based on the sync. NB The type attribute is for legacy sessions. 

444 

445 Parameters 

446 ---------- 

447 session_params : dict, bool 

448 The loaded experiment description file. If False, attempts to load it from the session_path. 

449 """ 

450 if session_params is False: 1kcaihbd

451 if not self.session_path: 

452 raise ValueError('No session path set') 

453 session_params = read_params(self.session_path) 

454 sync, sync_dict = get_sync(session_params) 1kcaihbd

455 self.sync = self.sync or sync 1kcaihbd

456 self.sync_collection = self.sync_collection or sync_dict.get('collection') 1kcaihbd

457 if self.sync: 1kcaihbd

458 self._type = 'ephys' if self.sync == 'nidq' else 'training' 1chd

459 

460 def _update_meta_from_session_params(self, sess_params): 

461 """ 

462 Update the default expected video properties. 

463 

464 Use properties defined in the experiment description file, if present. This updates the 

465 `video_meta` property with the fps, width and height for the type and camera label. 

466 

467 Parameters 

468 ---------- 

469 sess_params : dict 

470 The loaded experiment description file. 

471 """ 

472 try: 1caihbd

473 assert sess_params 1caihbd

474 video_pars = sess_params.get('devices', {}).get('cameras', {}).get(self.label) 1chd

475 except (AssertionError, KeyError): 1aib

476 return 1aib

477 PROPERTIES = ('width', 'height', 'fps') 1chd

478 video_meta = copy.deepcopy(self.video_meta) # must re-assign as it's a class attribute 1chd

479 if self.type not in video_meta: 1chd

480 video_meta[self.type] = {} 

481 if self.label not in video_meta[self.type]: 1chd

482 video_meta[self.type][self.label] = {} 

483 video_meta[self.type][self.label].update( 1chd

484 **{k: v for k, v in video_pars.items() if k in PROPERTIES} 

485 ) 

486 self.video_meta = video_meta 1chd

487 

488 def run(self, update: bool = False, **kwargs) -> (str, dict): 

489 """ 

490 Run video QC checks and return outcome. 

491 

492 :param update: if True, updates the session QC fields on Alyx 

493 :param download_data: if True, downloads any missing data if required 

494 :param extract_times: if True, re-extracts the camera timestamps from the raw data 

495 :returns: overall outcome as a str, a dict of checks and their outcomes 

496 """ 

497 _log.info(f'Computing QC outcome for {self.label} camera, session {self.eid}') 1caibd

498 namespace = f'video{self.label.capitalize()}' 1caibd

499 if all(x is None for x in self.data.values()): 1caibd

500 self.load_data(**kwargs) 1caid

501 if self.data['frame_samples'] is None or self.data['timestamps'] is None: 1caibd

502 return spec.QC.NOT_SET, {} 1i

503 if self.data['timestamps'].shape[0] == 0: 1cabd

504 _log.error(f'No timestamps for {self.label} camera; setting outcome to CRITICAL') 

505 return spec.QC.CRITICAL, {} 

506 

507 def is_metric(x): 1cabd

508 return isfunction(x) and x.__name__.startswith('check_') 1cabd

509 # import importlib 

510 # classe = getattr(importlib.import_module(self.__module__), self.__name__) 

511 # print(classe) 

512 

513 checks = getmembers(self.__class__, is_metric) 1cabd

514 checks = self._remove_check(checks) 1cabd

515 self.metrics = {f'_{namespace}_' + k[6:]: fn(self) for k, fn in checks} 1cabd

516 

517 values = [x if isinstance(x, spec.QC) else x[0] for x in self.metrics.values()] 1cabd

518 outcome = max(map(spec.QC.validate, values)) 1cabd

519 

520 if update: 1cabd

521 extended = { 1cd

522 k: spec.QC.NOT_SET if v is None else v 

523 for k, v in self.metrics.items() 

524 } 

525 self.update_extended_qc(extended) 1cd

526 self.update(outcome, namespace) 1cd

527 return outcome, self.metrics 1cabd

528 

529 def _remove_check(self, checks): 

530 """ 

531 Remove one or more check functions from QC checklist. 

532 

533 Parameters 

534 ---------- 

535 checks : list of tuple 

536 The complete list of check function name and handle. 

537 

538 Returns 

539 ------- 

540 list of tuple 

541 The list of check function name and handle, sans names in `checks_to_remove` property. 

542 """ 

543 if len(self.checks_to_remove) == 0: 1cabd

544 return checks 1cabd

545 else: 

546 for check in self.checks_to_remove: 

547 check_names = [ch[0] for ch in checks] 

548 idx = check_names.index(check) 

549 checks.pop(idx) 

550 return checks 

551 

552 def check_brightness(self, bounds=(40, 200), max_std=20, roi=True, display=False): 

553 """Check that the video brightness is within a given range. 

554 

555 The mean brightness of each frame must be with the bounds provided, and the standard 

556 deviation across samples frames should be less than the given value. Assumes that the 

557 frame samples are 2D (no colour channels). 

558 

559 :param bounds: For each frame, check that: bounds[0] < M < bounds[1], 

560 where M = mean(frame). If less than 75% of sample frames outside of these bounds, the 

561 outcome is WARNING. If <75% of frames within twice the bounds, the outcome is FAIL. 

562 :param max_std: The standard deviation of the frame luminance means must be less than this 

563 :param roi: If True, check brightness on ROI of frame 

564 :param display: When True the mean frame luminance is plotted against sample frames. 

565 The sample frames with the lowest and highest mean luminance are shown. 

566 """ 

567 if self.data['frame_samples'] is None: 1lcabgd

568 return spec.QC.NOT_SET 1l

569 if roi is True: 1lcabgd

570 _, h, w = self.data['frame_samples'].shape 1lcabgd

571 if self.label == 'body': # Latter half 1lcabgd

572 roi = (slice(None), slice(None), slice(int(w / 2), None, None)) 1g

573 elif self.label == 'left': # Top left quadrant (~2/3, 1/2 height) 1lcabgd

574 roi = (slice(None), slice(None, int(h / 2), None), slice(None, int(w * .66), None)) 1lcabgd

575 else: # Top right quadrant (~2/3 width, 1/2 height) 

576 roi = (slice(None), slice(None, int(h / 2), None), slice(int(w * .66), None, None)) 1g

577 else: 

578 roi = (slice(None), slice(None), slice(None)) 

579 brightness = self.data['frame_samples'][roi].mean(axis=(1, 2)) 1lcabgd

580 # dims = self.data['frame_samples'].shape 

581 # brightness /= np.array((*dims[1:], 255)).prod() # Normalize 

582 

583 if display: 1lcabgd

584 f = plt.figure() 1l

585 gs = f.add_gridspec(2, 3) 1l

586 indices = self.frame_samples_idx 1l

587 # Plot mean frame luminance 

588 ax = f.add_subplot(gs[:2, :2]) 1l

589 plt.plot(indices, brightness, label='brightness') 1l

590 ax.set( 1l

591 xlabel='frame #', 

592 ylabel='brightness (mean pixel)', 

593 title='Brightness') 

594 ax.hlines(bounds, 0, indices[-1], 1l

595 colors='tab:orange', linestyles=':', label='warning bounds') 

596 ax.hlines((bounds[0] / 2, bounds[1] * 2), 0, indices[-1], 1l

597 colors='r', linestyles=':', label='failure bounds') 

598 ax.legend() 1l

599 # Plot min-max frames 

600 for i, idx in enumerate((np.argmax(brightness), np.argmin(brightness))): 1l

601 a = f.add_subplot(gs[i, 2]) 1l

602 ax.annotate('*', (indices[idx], brightness[idx]), # this is the point to label 1l

603 textcoords='offset points', xytext=(0, 1), ha='center') 

604 frame = self.data['frame_samples'][idx][roi[1:]] 1l

605 title = ('min' if i else 'max') + ' mean luminance = %.2f' % brightness[idx] 1l

606 self.imshow(frame, ax=a, title=title) 1l

607 

608 PCT_PASS = .75 # Proportion of sample frames that must pass 1lcabgd

609 # Warning if brightness not within range (3/4 of frames must be between bounds) 

610 warn_range = np.logical_and(brightness > bounds[0], brightness < bounds[1]) 1lcabgd

611 warn_range = 'PASS' if sum(warn_range) / self.n_samples > PCT_PASS else 'WARNING' 1lcabgd

612 # Fail if brightness not within twice the range or std less than threshold 

613 fail_range = np.logical_and(brightness > bounds[0] / 2, brightness < bounds[1] * 2) 1lcabgd

614 within_range = sum(fail_range) / self.n_samples > PCT_PASS 1lcabgd

615 fail_range = 'PASS' if within_range and np.std(brightness) < max_std else 'FAIL' 1lcabgd

616 return self.overall_outcome([warn_range, fail_range]) 1lcabgd

617 

618 def check_file_headers(self): 

619 """Check reported frame rate matches FPGA frame rate.""" 

620 if None in (self.data['video'], self.video_meta): 1xcabgd

621 return spec.QC.NOT_SET 1x

622 expected = self.video_meta[self.type][self.label] 1xcabgd

623 return spec.QC.PASS if self.data['video']['fps'] == expected['fps'] else spec.QC.FAIL 1xcabgd

624 

625 def check_framerate(self, threshold=1.): 

626 """Check camera times match specified frame rate for camera. 

627 

628 :param threshold: The maximum absolute difference between timestamp sample rate and video 

629 frame rate. NB: Does not take into account dropped frames. 

630 """ 

631 if any(x is None for x in (self.data['timestamps'], self.video_meta)): 1vcabd

632 return spec.QC.NOT_SET 1v

633 fps = self.video_meta[self.type][self.label]['fps'] 1vcabd

634 Fs = 1 / np.median(np.diff(self.data['timestamps'])) # Approx. frequency of camera 1vcabd

635 return spec.QC.PASS if abs(Fs - fps) < threshold else spec.QC.FAIL, float(round(Fs, 3)) 1vcabd

636 

637 def check_pin_state(self, display=False): 

638 """Check the pin state reflects Bpod TTLs.""" 

639 if not data_for_keys(('video', 'pin_state', 'audio'), self.data): 1mcabd

640 return spec.QC.NOT_SET 1mcd

641 size_diff = int(self.data['pin_state'].shape[0] - self.data['video']['length']) 1mab

642 # NB: The pin state can be high for 2 consecutive frames 

643 low2high = np.insert(np.diff(self.data['pin_state'][:, -1].astype(int)) == 1, 0, False) 1mab

644 # NB: Time between two consecutive TTLs can be sub-frame, so this will fail 

645 ndiff_low2high = int(self.data['audio'][::2].size - sum(low2high)) 1mab

646 # state_ttl_matches = ndiff_low2high == 0 

647 # Check within ms of audio times 

648 if display: 1mab

649 plt.Figure() 1m

650 plt.plot(self.data['timestamps'][low2high], np.zeros(sum(low2high)), 'o', 1m

651 label='GPIO Low -> High') 

652 plt.plot(self.data['audio'], np.zeros(self.data['audio'].size), 'rx', 1m

653 label='Audio TTL High') 

654 plt.xlabel('FPGA frame times / s') 1m

655 plt.gca().set(yticklabels=[]) 1m

656 plt.gca().tick_params(left=False) 1m

657 plt.legend() 1m

658 

659 outcome = self.overall_outcome( 1mab

660 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL', 

661 'PASS' if np.abs(ndiff_low2high) < 5 else 'WARNING') 

662 ) 

663 return outcome, ndiff_low2high, size_diff 1mab

664 

665 def check_dropped_frames(self, threshold=.1): 

666 """Check how many frames were reported missing. 

667 

668 :param threshold: The maximum allowable percentage of dropped frames 

669 """ 

670 if not data_for_keys(('video', 'count'), self.data): 1ncabd

671 return spec.QC.NOT_SET 1ncd

672 size_diff = int(self.data['count'].size - self.data['video']['length']) 1nab

673 strict_increase = np.all(np.diff(self.data['count']) > 0) 1nab

674 if not strict_increase: 1nab

675 n_effected = np.sum(np.invert(strict_increase)) 1n

676 _log.info(f'frame count not strictly increasing: ' 1n

677 f'{n_effected} frames effected ({n_effected / strict_increase.size:.2%})') 

678 return spec.QC.CRITICAL 1n

679 dropped = np.diff(self.data['count']).astype(int) - 1 1nab

680 pct_dropped = (sum(dropped) / len(dropped) * 100) 1nab

681 # Calculate overall outcome for this check 

682 outcome = self.overall_outcome( 1nab

683 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL', 

684 'PASS' if pct_dropped < threshold else 'FAIL') 

685 ) 

686 return outcome, int(sum(dropped)), size_diff 1nab

687 

688 def check_timestamps(self): 

689 """Check that the camera.times array is reasonable.""" 

690 if not data_for_keys(('timestamps', 'video'), self.data): 1wcabd

691 return spec.QC.NOT_SET 1w

692 # Check number of timestamps matches video 

693 length_matches = self.data['timestamps'].size == self.data['video'].length 1wcabd

694 # Check times are strictly increasing 

695 increasing = all(np.diff(self.data['timestamps']) > 0) 1wcabd

696 # Check times do not contain nans 

697 nanless = not np.isnan(self.data['timestamps']).any() 1wcabd

698 return spec.QC.PASS if increasing and length_matches and nanless else spec.QC.FAIL 1wcabd

699 

700 def check_camera_times(self): 

701 """Check that the number of raw camera timestamps matches the number of video frames.""" 

702 if not data_for_keys(('bonsai_times', 'video'), self.data): 1ycabd

703 return spec.QC.NOT_SET 1y

704 length_match = len(self.data['camera_times']) == self.data['video'].length 1ycabd

705 outcome = spec.QC.PASS if length_match else spec.QC.WARNING 1ycabd

706 # 1 / np.median(np.diff(self.data.camera_times)) 

707 return outcome, len(self.data['camera_times']) - self.data['video'].length 1ycabd

708 

709 def check_resolution(self): 

710 """Check that the timestamps and video file resolution match what we expect.""" 

711 if self.data['video'] is None: 1qcabgd

712 return spec.QC.NOT_SET 1q

713 actual = self.data['video'] 1qcabgd

714 expected = self.video_meta[self.type][self.label] 1qcabgd

715 match = actual['width'] == expected['width'] and actual['height'] == expected['height'] 1qcabgd

716 return spec.QC.PASS if match else spec.QC.FAIL 1qcabgd

717 

718 def check_wheel_alignment(self, tolerance=(1, 2), display=False): 

719 """Check wheel motion in video correlates with the rotary encoder signal. 

720 

721 Check is skipped for body camera videos as the wheel is often obstructed 

722 

723 Parameters 

724 ---------- 

725 tolerance : int, (int, int) 

726 Maximum absolute offset in frames. If two values, the maximum value is taken as the 

727 warning threshold. 

728 display : bool 

729 If true, the wheel motion energy is plotted against the rotary encoder. 

730 

731 Returns 

732 ------- 

733 one.alf.spec.QC 

734 The outcome, one of {'NOT_SET', 'FAIL', 'WARNING', 'PASS'}. 

735 int 

736 Frame offset, i.e. by how many frames the video was shifted to match the rotary encoder 

737 signal. Negative values mean the video was shifted backwards with respect to the wheel 

738 timestamps. 

739 

740 Notes 

741 ----- 

742 - A negative frame offset typically means that there were frame TTLs at the beginning that 

743 do not correspond to any video frames (sometimes the first few frames aren't saved to 

744 disk). Since 2021-09-15 the extractor should compensate for this. 

745 """ 

746 wheel_present = data_for_keys(('position', 'timestamps', 'period'), self.data['wheel']) 1pocabd

747 if not wheel_present or self.label == 'body': 1pocabd

748 return spec.QC.NOT_SET 1po

749 

750 # Check the selected wheel movement period occurred within camera timestamp time 

751 camera_times = self.data['timestamps'] 1pcabd

752 in_range = within_ranges(camera_times, self.data['wheel']['period'].reshape(-1, 2)) 1pcabd

753 if not in_range.any(): 1pcabd

754 # Check if any camera timestamps overlap with the wheel times 

755 if np.any(np.logical_and( 1pcd

756 camera_times > self.data['wheel']['timestamps'][0], 

757 camera_times < self.data['wheel']['timestamps'][-1]) 

758 ): 

759 _log.warning('Unable to check wheel alignment: ' 1pcd

760 'chosen movement is not during video') 

761 return spec.QC.NOT_SET 1pcd

762 else: 

763 # No overlap, return fail 

764 return spec.QC.FAIL 1p

765 aln = MotionAlignment(self.eid, self.one, self.log, session_path=self.session_path) 1ab

766 aln.data = self.data.copy() 1ab

767 aln.data['camera_times'] = {self.label: camera_times} 1ab

768 aln.video_paths = {self.label: self.video_path} 1ab

769 offset, *_ = aln.align_motion(period=self.data['wheel'].period, 1ab

770 display=display, side=self.label) 

771 if offset is None: 1ab

772 return spec.QC.NOT_SET 

773 if display: 1ab

774 aln.plot_alignment() 

775 

776 # Determine the outcome. If there are two values for the tolerance, one is taken to be 

777 # a warning threshold, the other a failure threshold. 

778 out_map = {0: spec.QC.WARNING, 1: spec.QC.WARNING, 2: spec.QC.PASS} # 0: FAIL -> WARNING Aug 2022 1ab

779 passed = np.abs(offset) <= np.sort(np.array(tolerance)) 1ab

780 return out_map[sum(passed)], int(offset) 1ab

781 

782 def check_position(self, hist_thresh=(75, 80), pos_thresh=(10, 15), 

783 metric=cv2.TM_CCOEFF_NORMED, 

784 display=False, test=False, roi=None, pct_thresh=True): 

785 """Check camera is positioned correctly. 

786 

787 For the template matching zero-normalized cross-correlation (default) should be more 

788 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should 

789 also work. 

790 

791 If display is True, the template ROI (pick hashed) is plotted over a video frame, 

792 along with the threshold regions (green solid). The histogram correlations are plotted 

793 and the full histogram is plotted for one of the sample frames and the reference frame. 

794 

795 :param hist_thresh: The minimum histogram cross-correlation threshold to pass (0-1). 

796 :param pos_thresh: The maximum number of pixels off that the template matcher may be off 

797 by. If two values are provided, the lower threshold is treated as a warning boundary. 

798 :param metric: The metric to use for template matching. 

799 :param display: If true, the results are plotted 

800 :param test: If true a reference frame instead of the frames in frame_samples. 

801 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2)) 

802 :param pct_thresh: If true, the thresholds are treated as percentages 

803 """ 

804 if not test and self.data['frame_samples'] is None: 1jcabgd

805 return spec.QC.NOT_SET 1j

806 refs = self.load_reference_frames(self.label) 1jcabgd

807 # ensure iterable 

808 pos_thresh = np.sort(np.array(pos_thresh)) 1jcabgd

809 hist_thresh = np.sort(np.array(hist_thresh)) 1jcabgd

810 

811 # Method 1: compareHist 

812 #### Mean hist comparison 

813 # ref_h = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in refs] 

814 # ref_h = np.array(ref_h).mean(axis=0) 

815 # frames = refs if test else self.data['frame_samples'] 

816 # hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames] 

817 # test_h = np.array(hists).mean(axis=0) 

818 # corr = cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL) 

819 # if pct_thresh: 

820 # corr *= 100 

821 # hist_passed = corr > hist_thresh 

822 #### 

823 ref_h = cv2.calcHist([refs[0]], [0], None, [256], [0, 256]) 1jcabgd

824 frames = refs if test else self.data['frame_samples'] 1jcabgd

825 hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames] 1jcabgd

826 corr = np.array([cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL) for test_h in hists]) 1jcabgd

827 if pct_thresh: 1jcabgd

828 corr *= 100 1jcabgd

829 hist_passed = [np.all(corr > x) for x in hist_thresh] 1jcabgd

830 

831 # Method 2: 

832 top_left, roi, template = self.find_face(roi=roi, test=test, metric=metric, refs=refs) 1jcabgd

833 (y1, y2), (x1, x2) = roi 1jcabgd

834 err = (x1, y1) - np.median(np.array(top_left), axis=0) 1jcabgd

835 h, w = frames[0].shape[:2] 1jcabgd

836 

837 if pct_thresh: # Threshold as percent 1jcabgd

838 # t_x, t_y = pct_thresh 

839 err_pct = [(abs(x) / y) * 100 for x, y in zip(err, (h, w))] 1jcabgd

840 face_passed = [all(err_pct < x) for x in pos_thresh] 1jcabgd

841 else: 

842 face_passed = [np.all(np.abs(err) < x) for x in pos_thresh] 1j

843 

844 if display: 1jcabgd

845 plt.figure() 1j

846 # Plot frame with template overlay 

847 img = frames[0] 1j

848 ax0 = plt.subplot(221) 1j

849 ax0.imshow(img, cmap='gray', vmin=0, vmax=255) 1j

850 bounds = (x1 - err[0], x2 - err[0], y2 - err[1], y1 - err[1]) 1j

851 ax0.imshow(template, cmap='gray', alpha=0.5, extent=bounds) 1j

852 if pct_thresh: 1j

853 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1j

854 t_y = (h / 100) * thresh 1j

855 t_x = (w / 100) * thresh 1j

856 xy = (x1 - t_x, y1 - t_y) 1j

857 ax0.add_patch(Rectangle(xy, x2 - x1 + (t_x * 2), y2 - y1 + (t_y * 2), 1j

858 fill=True, facecolor=c, lw=0, alpha=0.05)) 

859 else: 

860 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1j

861 xy = (x1 - thresh, y1 - thresh) 1j

862 ax0.add_patch(Rectangle(xy, x2 - x1 + (thresh * 2), y2 - y1 + (thresh * 2), 1j

863 fill=True, facecolor=c, lw=0, alpha=0.05)) 

864 xy = (x1 - err[0], y1 - err[1]) 1j

865 ax0.add_patch(Rectangle(xy, x2 - x1, y2 - y1, 1j

866 edgecolor='pink', fill=False, hatch='//', lw=1)) 

867 ax0.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0)) 1j

868 ax0.set_axis_off() 1j

869 # Plot the image histograms 

870 ax1 = plt.subplot(212) 1j

871 ax1.plot(ref_h[5:-1], label='reference frame') 1j

872 ax1.plot(np.array(hists).mean(axis=0)[5:-1], label='mean frame') 1j

873 ax1.set_xlim([0, 256]) 1j

874 plt.legend() 1j

875 # Plot the correlations for each sample frame 

876 ax2 = plt.subplot(222) 1j

877 ax2.plot(corr, label='hist correlation') 1j

878 ax2.axhline(hist_thresh[0], 0, self.n_samples, 1j

879 linestyle=':', color='r', label='fail threshold') 

880 ax2.axhline(hist_thresh[1], 0, self.n_samples, 1j

881 linestyle=':', color='g', label='pass threshold') 

882 ax2.set(xlabel='Sample Frame #', ylabel='Hist correlation') 1j

883 plt.legend() 1j

884 plt.suptitle('Check position') 1j

885 plt.show() 1j

886 

887 pass_map = {i: s for i, s in enumerate(('FAIL', 'WARNING', 'PASS'))} 1jcabgd

888 face_aligned = pass_map[sum(face_passed)] 1jcabgd

889 hist_correlates = pass_map[sum(hist_passed)] 1jcabgd

890 

891 return self.overall_outcome([face_aligned, hist_correlates], agg=min) 1jcabgd

892 

893 def check_focus(self, n=20, threshold=(100, 6), 

894 roi=False, display=False, test=False, equalize=True): 

895 """Check video is in focus. 

896 

897 Two methods are used here: Looking at the high frequencies with a DFT and 

898 applying a Laplacian HPF and looking at the variance. 

899 

900 Note: 

901 - Both methods are sensitive to noise (Laplacian is 2nd order filter). 

902 - The thresholds for the fft may need to be different for the left/right vs body as 

903 the distribution of frequencies in the image is different (e.g. the holder 

904 comprises mostly very high frequencies). 

905 - The image may be overall in focus but the places we care about can still be out of 

906 focus (namely the face). For this we'll take an ROI around the face. 

907 - Focus check thrown off by brightness. This may be fixed by equalizing the histogram 

908 (set equalize=True) 

909 

910 Parameters 

911 ---------- 

912 n : int 

913 Number of frames from frame_samples data to use in check. 

914 threshold : tuple of float 

915 The lower boundary for Laplacian variance and mean FFT filtered brightness, 

916 respectively. 

917 roi : bool, None, list of slice 

918 If False, the roi is determined via template matching for the face or body. 

919 If None, some set ROIs for face and paws are used. A list of slices may also be passed. 

920 display : bool 

921 If true, the results are displayed. 

922 test : bool 

923 If true, a set of artificially blurred reference frames are used as the input. This can 

924 be used to selecting reasonable thresholds. 

925 equalize : bool 

926 If true, the histograms of the frames are equalized, resulting in an increased the 

927 global contrast and linear CDF. This makes check robust to low light conditions. 

928 

929 Returns 

930 ------- 

931 one.spec.QC 

932 The QC outcome, either FAIL or PASS. 

933 """ 

934 no_frames = self.data['frame_samples'] is None or len(self.data['frame_samples']) == 0 1ecabgd

935 if not test and no_frames: 1ecabgd

936 return spec.QC.NOT_SET 1e

937 

938 if roi is False: 1ecabgd

939 top_left, roi, _ = self.find_face(test=test) # (y1, y2), (x1, x2) 1ecabgd

940 h, w = map(lambda x: np.diff(x).item(), roi) 1ecabgd

941 x, y = np.median(np.array(top_left), axis=0).round().astype(int) 1ecabgd

942 roi = (np.s_[y: y + h, x: x + w],) 1ecabgd

943 else: 

944 ROI = { 1e

945 'left': (np.s_[:400, :561], np.s_[500:, 100:800]), # (face, wheel) 

946 'right': (np.s_[:196, 397:], np.s_[221:, 255:]), 

947 'body': (np.s_[143:274, 84:433],) # body holder 

948 } 

949 roi = roi or ROI[self.label] 1e

950 

951 if test: 1ecabgd

952 """In test mode load a reference frame and run it through a normalized box filter with 1e

953 increasing kernel size. 

954 """ 

955 idx = (0,) 1e

956 ref = self.load_reference_frames(self.label)[idx] 1e

957 kernal_sz = np.unique(np.linspace(0, 15, n, dtype=int)) 1e

958 n = kernal_sz.size # Size excluding repeated kernels 1e

959 img = np.empty((n, *ref.shape), dtype=np.uint8) 1e

960 for i, k in enumerate(kernal_sz): 1e

961 img[i] = ref.copy() if k == 0 else cv2.blur(ref, (k, k)) 1e

962 if equalize: 1e

963 [cv2.equalizeHist(x, x) for x in img] 1e

964 if display: 1e

965 # Plot blurred images 

966 f, axes = plt.subplots(1, len(kernal_sz)) 1e

967 for ax, ig, k in zip(axes, img, kernal_sz): 1e

968 self.imshow(ig, ax=ax, title='Kernal ({0}, {0})'.format(k or 'None')) 1e

969 f.suptitle('Reference frame with box filter') 1e

970 else: 

971 # Sub-sample the frame samples 

972 idx = np.unique(np.linspace(0, len(self.data['frame_samples']) - 1, n, dtype=int)) 1ecabgd

973 img = self.data['frame_samples'][idx] 1ecabgd

974 if equalize: 1ecabgd

975 [cv2.equalizeHist(x, x) for x in img] 1ecabgd

976 

977 # A measure of the sharpness effectively taking the second derivative of the image 

978 lpc_var = np.empty((min(n, len(img)), len(roi))) 1ecabgd

979 for i, frame in enumerate(img[::-1]): 1ecabgd

980 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1ecabgd

981 lpc_var[i] = [lpc[mask].var() for mask in roi] 1ecabgd

982 

983 if display: 1ecabgd

984 # Plot the first sample image 

985 f = plt.figure() 1e

986 gs = f.add_gridspec(len(roi) + 1, 3) 1e

987 f.add_subplot(gs[0:len(roi), 0]) 1e

988 frame = img[0] 1e

989 self.imshow(frame, title=f'Frame #{self.frame_samples_idx[idx[0]]}') 1e

990 # Plot the ROIs with and without filter 

991 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1e

992 abs_lpc = cv2.convertScaleAbs(lpc) 1e

993 for i, r in enumerate(roi): 1e

994 f.add_subplot(gs[i, 1]) 1e

995 self.imshow(frame[r], title=f'ROI #{i + 1}') 1e

996 f.add_subplot(gs[i, 2]) 1e

997 self.imshow(abs_lpc[r], title=f'ROI #{i + 1} - Lapacian filter') 1e

998 f.suptitle('Laplacian blur detection') 1e

999 # Plot variance over frames 

1000 ax = f.add_subplot(gs[len(roi), :]) 1e

1001 ln = plt.plot(lpc_var) 1e

1002 [l.set_label(f'ROI #{i + 1}') for i, l in enumerate(ln)] 1e

1003 ax.axhline(threshold[0], 0, n, linestyle=':', color='r', label='lower threshold') 1e

1004 ax.set(xlabel='Frame sample', ylabel='Variance of the Laplacian') 1e

1005 plt.tight_layout() 1e

1006 plt.legend() 1e

1007 

1008 # Second test is to highpass with dft 

1009 h, w = img.shape[1:] 1ecabgd

1010 cX, cY = w // 2, h // 2 1ecabgd

1011 sz = 60 # Seems to be the magic number for high pass 1ecabgd

1012 mask = np.ones((h, w, 2), bool) 1ecabgd

1013 mask[cY - sz:cY + sz, cX - sz:cX + sz] = False 1ecabgd

1014 filt_mean = np.empty(len(img)) 1ecabgd

1015 for i, frame in enumerate(img[::-1]): 1ecabgd

1016 dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT) 1ecabgd

1017 f_shift = np.fft.fftshift(dft) * mask # Shift & remove low frequencies 1ecabgd

1018 f_ishift = np.fft.ifftshift(f_shift) # Shift back 1ecabgd

1019 filt_frame = cv2.idft(f_ishift) # Reconstruct 1ecabgd

1020 filt_frame = cv2.magnitude(filt_frame[..., 0], filt_frame[..., 1]) 1ecabgd

1021 # Re-normalize to 8-bits to make threshold simpler 

1022 img_back = cv2.normalize(filt_frame, None, alpha=0, beta=256, 1ecabgd

1023 norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) 

1024 filt_mean[i] = np.mean(img_back) 1ecabgd

1025 if i == len(img) - 1 and display: 1ecabgd

1026 # Plot Fourier transforms 

1027 f = plt.figure() 1e

1028 gs = f.add_gridspec(2, 3) 1e

1029 self.imshow(img[0], ax=f.add_subplot(gs[0, 0]), title='Original frame') 1e

1030 dft_shift = np.fft.fftshift(dft) 1e

1031 magnitude = 20 * np.log(cv2.magnitude(dft_shift[..., 0], dft_shift[..., 1])) 1e

1032 self.imshow(magnitude, ax=f.add_subplot(gs[0, 1]), title='Magnitude spectrum') 1e

1033 self.imshow(img_back, ax=f.add_subplot(gs[0, 2]), title='Filtered frame') 1e

1034 ax = f.add_subplot(gs[1, :]) 1e

1035 ax.plot(filt_mean) 1e

1036 ax.axhline(threshold[1], 0, n, linestyle=':', color='r', label='lower threshold') 1e

1037 ax.set(xlabel='Frame sample', ylabel='Mean of filtered frame') 1e

1038 f.suptitle('Discrete Fourier Transform') 1e

1039 plt.show() 1e

1040 passes = np.all(lpc_var > threshold[0]) or np.all(filt_mean > threshold[1]) 1ecabgd

1041 return spec.QC.PASS if passes else spec.QC.FAIL 1ecabgd

1042 

1043 def find_face(self, roi=None, test=False, metric=cv2.TM_CCOEFF_NORMED, refs=None): 

1044 """Use template matching to find face location in frame. 

1045 

1046 For the template matching zero-normalized cross-correlation (default) should be more 

1047 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should 

1048 also work. That said, normalizing the histograms works best. 

1049 

1050 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2)) 

1051 :param test: If True the template is matched against frames that come from the same session 

1052 :param metric: The metric to use for template matching 

1053 :param refs: An array of frames to match the template to 

1054 

1055 :returns: (y1, y2), (x1, x2) 

1056 """ 

1057 ROI = { 1ejcabgd

1058 'left': ((45, 346), (138, 501)), 

1059 'right': ((14, 174), (430, 618)), 

1060 'body': ((141, 272), (90, 339)) 

1061 } 

1062 roi = roi or ROI[self.label] 1ejcabgd

1063 refs = self.load_reference_frames(self.label) if refs is None else refs 1ejcabgd

1064 

1065 frames = refs if test else self.data['frame_samples'] 1ejcabgd

1066 template = refs[0][tuple(slice(*r) for r in roi)] 1ejcabgd

1067 top_left = [] # [(x1, y1), ...] 1ejcabgd

1068 for frame in frames: 1ejcabgd

1069 res = cv2.matchTemplate(frame, template, metric) 1ejcabgd

1070 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) 1ejcabgd

1071 # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum 

1072 top_left.append(min_loc if metric < 2 else max_loc) 1ejcabgd

1073 # bottom_right = (top_left[0] + w, top_left[1] + h) 

1074 return top_left, roi, template 1ejcabgd

1075 

1076 @staticmethod 

1077 def load_reference_frames(side): 

1078 """Load some reference frames for a given video. 

1079 

1080 The reference frames are from sessions where the camera was well positioned. The 

1081 frames are in qc/reference, one file per camera, only one channel per frame. The 

1082 session eids can be found in qc/reference/frame_src.json 

1083 

1084 :param side: Video label, e.g. 'left' 

1085 :return: numpy array of frames with the shape (n, h, w) 

1086 """ 

1087 file = next(Path(__file__).parent.joinpath('reference').glob(f'frames_{side}.npy')) 1lejcabgd

1088 refs = np.load(file) 1lejcabgd

1089 return refs 1lejcabgd

1090 

1091 @staticmethod 

1092 def imshow(frame, ax=None, title=None, **kwargs): 

1093 """plt.imshow with some convenient defaults for greyscale frames.""" 

1094 h = ax or plt.gca() 1le

1095 defaults = { 1le

1096 'cmap': kwargs.pop('cmap', 'gray'), 

1097 'vmin': kwargs.pop('vmin', 0), 

1098 'vmax': kwargs.pop('vmax', 255) 

1099 } 

1100 h.imshow(frame, **defaults, **kwargs) 1le

1101 h.set(title=title) 1le

1102 h.set_axis_off() 1le

1103 return ax 1le

1104 

1105 

1106class CameraQCCamlog(CameraQC): 

1107 """ 

1108 A class for computing camera QC metrics from camlog data. 

1109 

1110 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for 

1111 timestamp alignment. 

1112 """ 

1113 

1114 dstypes = [ 

1115 '_iblrig_taskData.raw', 

1116 '_iblrig_taskSettings.raw', 

1117 '_iblrig_Camera.raw', 

1118 'camera.times', 

1119 'wheel.position', 

1120 'wheel.timestamps' 

1121 ] 

1122 dstypes_fpga = [ 

1123 '_spikeglx_sync.channels', 

1124 '_spikeglx_sync.polarities', 

1125 '_spikeglx_sync.times', 

1126 'DAQData.raw.meta', 

1127 'DAQData.wiring' 

1128 ] 

1129 

1130 def __init__(self, session_path_or_eid, camera, sync_collection='raw_sync_data', sync_type='nidq', **kwargs): 

1131 """Compute camera QC metrics from camlog data. 

1132 

1133 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for 

1134 timestamp alignment. 

1135 """ 

1136 super().__init__(session_path_or_eid, camera, sync_collection=sync_collection, sync_type=sync_type, **kwargs) 

1137 self._type = 'ephys' 

1138 self.checks_to_remove = ['check_pin_state'] 

1139 

1140 def load_data(self, download_data: bool = None, 

1141 extract_times: bool = False, load_video: bool = True, **kwargs) -> None: 

1142 """Extract the data from raw data files. 

1143 

1144 Extracts all the required task data from the raw data files. 

1145 

1146 Data keys: 

1147 - count (int array): the sequential frame number (n, n+1, n+2...) 

1148 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame 

1149 - audio (float array): timestamps of audio TTL fronts 

1150 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA 

1151 - timestamps (float array): extracted video timestamps (the camera.times ALF) 

1152 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame 

1153 - camera_times (float array): camera frame timestamps extracted from frame headers 

1154 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion 

1155 - video (Bunch): video meta data, including dimensions and FPS 

1156 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel) 

1157 

1158 :param download_data: if True, any missing raw data is downloaded via ONE. 

1159 Missing data will raise an AssertionError 

1160 :param extract_times: if True, the camera.times are re-extracted from the raw data 

1161 :param load_video: if True, calls the load_video_data method 

1162 """ 

1163 assert self.session_path, 'no session path set' 

1164 if download_data is not None: 

1165 self.download_data = download_data 

1166 if self.download_data and self.eid and self.one and not self.one.offline: 

1167 self.ensure_required_data() 

1168 _log.info('Gathering data for QC') 

1169 

1170 # If there is an experiment description and there are video parameters 

1171 sess_params = read_params(self.session_path) or {} 

1172 video_collection = get_video_collection(sess_params, self.label) 

1173 task_collection = get_task_collection(sess_params) 

1174 self._set_sync(sess_params) 

1175 self._update_meta_from_session_params(sess_params) 

1176 

1177 # Get frame count 

1178 log, _ = parse_cam_log(self.session_path.joinpath(video_collection, f'_iblrig_{self.label}Camera.raw.camlog')) 

1179 self.data['count'] = log.frame_id.values 

1180 

1181 # Load the audio and raw FPGA times 

1182 if self.sync != 'bpod' and self.sync is not None: 

1183 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) 

1184 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) 

1185 self.data['audio'] = audio_ttls['times'] # Get rises 

1186 # Load raw FPGA times 

1187 cam_ts = extract_camera_sync(sync, chmap) 

1188 self.data['fpga_times'] = cam_ts[self.label] 

1189 else: 

1190 bpod_data = raw.load_data(self.session_path, task_collection=task_collection) 

1191 _, audio_ttls = raw.load_bpod_fronts(self.session_path, data=bpod_data, task_collection=task_collection) 

1192 self.data['audio'] = audio_ttls['times'] 

1193 

1194 # Load extracted frame times 

1195 alf_path = self.session_path / 'alf' 

1196 try: 

1197 assert not extract_times 

1198 cam_path = next(alf_path.rglob(f'*{self.label}Camera.times*')).parent 

1199 self.data['timestamps'] = alfio.load_object( 

1200 cam_path, f'{self.label}Camera', short_keys=True)['times'] 

1201 except AssertionError: # Re-extract 

1202 kwargs = dict(video_path=self.video_path, labels=self.label) 

1203 if self.sync == 'bpod': 

1204 kwargs = {**kwargs, 'task_collection': task_collection} 

1205 else: 

1206 kwargs = {**kwargs, 'sync': sync, 'chmap': chmap} # noqa 

1207 outputs, _ = extract_all(self.session_path, self.sync, save=False, camlog=True, **kwargs) 

1208 self.data['timestamps'] = outputs[f'{self.label}_camera_timestamps'] 

1209 except ALFObjectNotFound: 

1210 _log.warning('no camera.times ALF found for session') 

1211 

1212 # Get audio and wheel data 

1213 wheel_keys = ('timestamps', 'position') 

1214 try: 

1215 # glob in case wheel data are in sub-collections 

1216 wheel_path = next(alf_path.rglob('*wheel.timestamps*')).parent 

1217 self.data['wheel'] = alfio.load_object(wheel_path, 'wheel', short_keys=True) 

1218 except ALFObjectNotFound: 

1219 # Extract from raw data 

1220 if self.sync != 'bpod': 

1221 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap) 

1222 else: 

1223 wheel_data = training_wheel.get_wheel_position(self.session_path, task_collection=task_collection) 

1224 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) 

1225 

1226 # Find short period of wheel motion for motion correlation. 

1227 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: 

1228 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel']) 

1229 

1230 # load in camera times 

1231 self.data['camera_times'] = log.timestamp.values 

1232 

1233 # Gather information from video file 

1234 if load_video: 

1235 _log.info('Inspecting video file...') 

1236 self.load_video_data() 

1237 

1238 def ensure_required_data(self): 

1239 """Ensure the datasets required for QC are local. 

1240 

1241 If the download_data attribute is True, any missing data are downloaded. If all the data 

1242 are not present locally at the end of it an exception is raised. If the stream attribute 

1243 is True, the video file is not required to be local, however it must be remotely accessible. 

1244 NB: Requires a valid instance of ONE and a valid session eid. 

1245 """ 

1246 assert self.one is not None, 'ONE required to download data' 

1247 

1248 sess_params = {} 

1249 if self.download_data: 

1250 dset = self.one.list_datasets(self.session_path, '*experiment.description*', details=True) 

1251 if self.one._check_filesystem(dset): 

1252 sess_params = read_params(self.session_path) or {} 

1253 else: 

1254 sess_params = read_params(self.session_path) or {} 

1255 self._set_sync(sess_params) 

1256 

1257 # dataset collections outside this list are ignored (e.g. probe00, raw_passive_data) 

1258 collections = ( 

1259 'alf', self.sync_collection, get_task_collection(sess_params), 

1260 get_video_collection(sess_params, self.label)) 

1261 

1262 # Get extractor type 

1263 dtypes = self.dstypes + self.dstypes_fpga 

1264 assert_unique = True 

1265 

1266 for dstype in dtypes: 

1267 datasets = self.one.type2datasets(self.eid, dstype, details=True) 

1268 if 'camera' in dstype.lower(): # Download individual camera file 

1269 datasets = filter_datasets(datasets, filename=f'.*{self.label}.*') 

1270 else: # Ignore probe datasets, etc. 

1271 datasets = filter_datasets(datasets, collection=collections, 

1272 assert_unique=assert_unique) 

1273 if any(x.endswith('.mp4') for x in datasets.rel_path) and self.stream: 

1274 names = [x.split('/')[-1] for x in self.one.list_datasets(self.eid, details=False)] 

1275 assert f'_iblrig_{self.label}Camera.raw.mp4' in names, 'No remote video file found' 

1276 continue 

1277 optional = ('camera.times', '_iblrig_Camera.raw', 'wheel.position', 'wheel.timestamps') 

1278 present = ( 

1279 self.one._check_filesystem(datasets) 

1280 if self.download_data 

1281 else (next(self.session_path.rglob(d), None) for d in datasets['rel_path']) 

1282 ) 

1283 

1284 required = (dstype not in optional) 

1285 all_present = not datasets.empty and all(present) 

1286 assert all_present or not required, f'Dataset {dstype} not found' 

1287 

1288 def check_camera_times(self): 

1289 """Check that the number of raw camera timestamps matches the number of video frames.""" 

1290 if not data_for_keys(('camera_times', 'video'), self.data): 

1291 return spec.QC.NOT_SET 

1292 length_match = len(self.data['camera_times']) == self.data['video'].length 

1293 outcome = spec.QC.PASS if length_match else spec.QC.WARNING 

1294 # 1 / np.median(np.diff(self.data.camera_times)) 

1295 return outcome, len(self.data['camera_times']) - self.data['video'].length 

1296 

1297 

1298def data_for_keys(keys, data): 

1299 """Check keys exist in 'data' dict and contain values other than None.""" 

1300 return data is not None and all(k in data and data.get(k, None) is not None for k in keys) 1ynmwpocaihbd

1301 

1302 

1303def get_task_collection(sess_params): 

1304 """ 

1305 Return the first non-passive task collection. 

1306 

1307 Returns the first task collection from the experiment description whose task name does not 

1308 contain 'passive', otherwise returns 'raw_behavior_data'. 

1309 

1310 Parameters 

1311 ---------- 

1312 sess_params : dict 

1313 The loaded experiment description file. 

1314 

1315 Returns 

1316 ------- 

1317 str: 

1318 The collection presumed to contain wheel data. 

1319 """ 

1320 sess_params = sess_params or {} 1kzcaihbd

1321 tasks = (chain(*map(dict.items, sess_params.get('tasks', [])))) 1kzcaihbd

1322 return next((v['collection'] for k, v in tasks if 'passive' not in k), 'raw_behavior_data') 1kzcaihbd

1323 

1324 

1325def get_video_collection(sess_params, label): 

1326 """ 

1327 Return the collection containing the raw video data for a given camera. 

1328 

1329 Parameters 

1330 ---------- 

1331 sess_params : dict 

1332 The loaded experiment description file. 

1333 label : str 

1334 The camera label. 

1335 

1336 Returns 

1337 ------- 

1338 str: 

1339 The collection presumed to contain the video data. 

1340 """ 

1341 DEFAULT = 'raw_video_data' 1k

1342 value = sess_params or {} 1k

1343 for key in ('devices', 'cameras', label, 'collection'): 1k

1344 value = value.get(key) 1k

1345 if not value: 1k

1346 return DEFAULT 1k

1347 return value 

1348 

1349 

1350def run_all_qc(session, cameras=('left', 'right', 'body'), **kwargs): 

1351 """Run QC for all cameras. 

1352 

1353 Run the camera QC for left, right and body cameras. 

1354 

1355 :param session: A session path or eid. 

1356 :param update: If True, QC fields are updated on Alyx. 

1357 :param cameras: A list of camera names to perform QC on. 

1358 :param stream: If true and local video files not available, the data are streamed from 

1359 the remote source. 

1360 :return: dict of CameraCQ objects 

1361 """ 

1362 qc = {} 1crstaud

1363 camlog = kwargs.pop('camlog', False) 1crstaud

1364 CamQC = CameraQCCamlog if camlog else CameraQC 1crstaud

1365 

1366 run_args = {k: kwargs.pop(k) for k in ('download_data', 'extract_times', 'update') 1crstaud

1367 if k in kwargs.keys()} 

1368 for camera in cameras: 1crstaud

1369 qc[camera] = CamQC(session, camera, **kwargs) 1crstaud

1370 qc[camera].run(**run_args) 1crstaud

1371 return qc 1crstaud