Coverage for ibllib/qc/camera.py: 82%
591 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 13:38 +0100
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 13:38 +0100
1"""Video quality control.
3This module runs a list of quality control metrics on the camera and extracted video data.
5Examples
6--------
7Run right camera QC, downloading all but video file
9>>> qc = CameraQC(eid, 'right', download_data=True, stream=True)
10>>> qc.run()
12Run left camera QC with session path, update QC field in Alyx
14>>> qc = CameraQC(session_path, 'left')
15>>> outcome, extended = qc.run(update=True) # Returns outcome of videoQC only
16>>> print(f'video QC = {outcome}; overall session QC = {qc.outcome}') # NB difference outcomes
18Run only video QC (no timestamp/alignment checks) on 20 frames for the body camera
20>>> qc = CameraQC(eid, 'body', n_samples=20)
21>>> qc.load_video_data() # Quicker than loading all data
22>>> qc.run()
24Run specific video QC check and display the plots
26>>> qc = CameraQC(eid, 'left')
27>>> qc.load_data(download_data=True)
28>>> qc.check_position(display=True) # NB: Not all checks make plots
30Run the QC for all cameras
32>>> qcs = run_all_qc(eid)
33>>> qcs['left'].metrics # Dict of checks and outcomes for left camera
34"""
35import logging
36from inspect import getmembers, isfunction
37from pathlib import Path
38from itertools import chain
39import copy
41import cv2
42import numpy as np
43import matplotlib.pyplot as plt
44from matplotlib.patches import Rectangle
46import one.alf.io as alfio
47from one.alf import spec
48from one.alf.exceptions import ALFObjectNotFound
49from iblutil.util import Bunch
50from iblutil.numerical import within_ranges
52import ibllib.io.extractors.camera as camio
53from ibllib.io.extractors import ephys_fpga, training_wheel, mesoscope
54from ibllib.io.extractors.video_motion import MotionAlignment
55from ibllib.io import raw_data_loaders as raw
56from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap
57from ibllib.io.session_params import read_params, get_sync, get_sync_namespace
58import brainbox.behavior.wheel as wh
59from ibllib.io.video import get_video_meta, get_video_frames_preload, assert_valid_label
60from . import base
62_log = logging.getLogger(__name__)
64try:
65 from labcams import parse_cam_log
66except ImportError:
67 _log.warning('labcams not installed')
70class CameraQC(base.QC):
71 """A class for computing camera QC metrics."""
73 dstypes = [
74 '_ibl_experiment.description',
75 '_iblrig_Camera.frameData', # Replaces the next 3 datasets
76 '_iblrig_Camera.frame_counter',
77 '_iblrig_Camera.GPIO',
78 '_iblrig_Camera.timestamps',
79 '_iblrig_taskData.raw',
80 '_iblrig_taskSettings.raw',
81 '_iblrig_Camera.raw',
82 'camera.times',
83 'wheel.position',
84 'wheel.timestamps'
85 ]
86 dstypes_fpga = [
87 '_spikeglx_sync.channels',
88 '_spikeglx_sync.polarities',
89 '_spikeglx_sync.times',
90 'ephysData.raw.meta'
91 ]
92 """Recall that for the training rig there is only one side camera at 30 Hz and 1280 x 1024 px.
93 For the recording rig there are two label cameras (left: 60 Hz, 1280 x 1024 px;
94 right: 150 Hz, 640 x 512 px) and one body camera (30 Hz, 640 x 512 px). """
95 video_meta = {
96 'training': {
97 'left': {
98 'fps': 30,
99 'width': 1280,
100 'height': 1024
101 }
102 },
103 'ephys': {
104 'left': {
105 'fps': 60,
106 'width': 1280,
107 'height': 1024
108 },
109 'right': {
110 'fps': 150,
111 'width': 640,
112 'height': 512
113 },
114 'body': {
115 'fps': 30,
116 'width': 640,
117 'height': 512
118 },
119 }
120 }
122 def __init__(self, session_path_or_eid, camera, **kwargs):
123 """
124 Compute and view camera QC metrics.
126 :param session_path_or_eid: A session id or path
127 :param camera: The camera to run QC on, if None QC is run for all three cameras
128 :param n_samples: The number of frames to sample for the position and brightness QC
129 :param stream: If true and local video files not available, the data are streamed from
130 the remote source.
131 :param log: A logging.Logger instance, if None the 'ibllib' logger is used
132 :param one: An ONE instance for fetching and setting the QC on Alyx
133 """
134 self.stream = kwargs.pop('stream', None) 1fachgb
135 self.n_samples = kwargs.pop('n_samples', 100) 1fachgb
136 self.sync_collection = kwargs.pop('sync_collection', None) 1fachgb
137 self.sync = kwargs.pop('sync_type', None) 1fachgb
138 self.protocol = kwargs.pop('protocol', None) 1fachgb
139 super().__init__(session_path_or_eid, **kwargs) 1fachgb
141 # Data
142 self.label = assert_valid_label(camera) 1fachgb
143 filename = f'_iblrig_{self.label}Camera.raw*.mp4' 1fachgb
144 raw_video_path = self.session_path.joinpath('raw_video_data') 1fachgb
145 self.video_path = next(raw_video_path.glob(filename), None) 1fachgb
147 # If local video doesn't exist, change video path to URL
148 if not self.video_path and self.stream is not False and self.one is not None: 1fachgb
149 try:
150 self.stream = True
151 self.video_path = self.one.path2url(raw_video_path / filename.replace('*', ''))
152 except (StopIteration, ALFObjectNotFound):
153 _log.error('No remote or local video file found')
154 self.video_path = None
156 logging.disable(logging.NOTSET) 1fachgb
157 keys = ('count', 'pin_state', 'audio', 'fpga_times', 'wheel', 'video', 1fachgb
158 'frame_samples', 'timestamps', 'camera_times', 'bonsai_times')
159 self.data = Bunch.fromkeys(keys) 1fachgb
160 self.frame_samples_idx = None 1fachgb
162 # QC outcomes map
163 self.metrics = None 1fachgb
164 self.outcome = spec.QC.NOT_SET 1fachgb
166 # Specify any checks to remove
167 if self.protocol is not None and 'habituation' in self.protocol: 1fachgb
168 self.checks_to_remove = ['check_wheel_alignment'] 1c
169 else:
170 self.checks_to_remove = [] 1fahgb
171 self._type = None 1fachgb
173 @property
174 def type(self):
175 """
176 Returns the camera type based on the protocol.
178 :return: Returns either None, 'ephys' or 'training'
179 """
180 if not self._type: 1troacgbe
181 return
182 else:
183 return 'ephys' if 'ephys' in self._type else 'training' 1troacgbe
185 def load_data(self, extract_times: bool = False, load_video: bool = True) -> None:
186 """Extract the data from raw data files.
188 Extracts all the required task data from the raw data files. Missing data will raise an
189 AssertionError. TODO This method could instead be moved to CameraExtractor classes.
191 Data keys:
192 - count (int array): the sequential frame number (n, n+1, n+2...)
193 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
194 - audio (float array): timestamps of audio TTL fronts
195 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
196 - timestamps (float array): extracted video timestamps (the camera.times ALF)
197 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
198 - camera_times (float array): camera frame timestamps extracted from frame headers
199 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
200 - video (Bunch): video meta data, including dimensions and FPS
201 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
203 :param extract_times: if True, the camera.times are re-extracted from the raw data
204 :param load_video: if True, calls the load_video_data method
205 """
206 assert self.session_path, 'no session path set' 1achgb
207 _log.info('Gathering data for QC') 1achgb
209 # Get frame count and pin state
210 self.data['count'], self.data['pin_state'] = \ 1achgb
211 raw.load_embedded_frame_data(self.session_path, self.label, raw=True)
213 # If there is an experiment description and there are video parameters
214 sess_params = read_params(self.session_path) or {} 1achgb
215 task_collection = get_task_collection(sess_params) 1achgb
216 ns = get_sync_namespace(sess_params) 1achgb
217 self._set_sync(sess_params) 1achgb
218 self._update_meta_from_session_params(sess_params) 1achgb
220 # Load the audio and raw FPGA times
221 sync = chmap = None 1achgb
222 if self.sync != 'bpod' and self.sync is not None: 1achgb
223 self.sync_collection = self.sync_collection or 'raw_ephys_data' 1ag
224 ns = ns or 'spikeglx' 1ag
225 if ns == 'spikeglx': 1ag
226 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) 1ag
227 elif ns == 'timeline': 1g
228 sync, chmap = load_timeline_sync_and_chmap(self.session_path / self.sync_collection)
229 else:
230 raise NotImplementedError(f'Unknown namespace "{ns}"') 1g
231 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) 1ag
232 self.data['audio'] = audio_ttls['times'] # Get rises 1ag
233 # Load raw FPGA times
234 cam_ts = camio.extract_camera_sync(sync, chmap) 1ag
235 self.data['fpga_times'] = cam_ts[self.label] 1ag
236 else:
237 self.sync_collection = self.sync_collection or task_collection 1chgb
238 bpod_data = raw.load_data(self.session_path, task_collection) 1chgb
239 _, audio_ttls = raw.load_bpod_fronts( 1chgb
240 self.session_path, data=bpod_data, task_collection=task_collection)
241 self.data['audio'] = audio_ttls['times'] 1chgb
243 # Load extracted frame times
244 alf_path = self.session_path / 'alf' 1achgb
245 try: 1achgb
246 assert not extract_times 1achgb
247 self.data['timestamps'] = alfio.load_object( 1hg
248 alf_path, f'{self.label}Camera', short_keys=True)['times']
249 except AssertionError: # Re-extract 1achb
250 kwargs = dict(video_path=self.video_path, save=False) 1acb
251 if self.sync == 'bpod': 1acb
252 extractor = camio.CameraTimestampsBpod(self.session_path) 1cb
253 kwargs['task_collection'] = task_collection 1cb
254 else:
255 kwargs.update(sync=sync, chmap=chmap) 1a
256 extractor = camio.CameraTimestampsFPGA(self.label, self.session_path) 1a
257 self.data['timestamps'] = extractor.extract(**kwargs)[0] 1acb
258 except ALFObjectNotFound: 1h
259 _log.warning('no camera.times ALF found for session') 1h
261 # Get audio and wheel data
262 wheel_keys = ('timestamps', 'position') 1achgb
263 try: 1achgb
264 # glob in case wheel data are in sub-collections
265 alf_path = next(alf_path.rglob('*wheel.timestamps*')).parent 1achgb
266 self.data['wheel'] = alfio.load_object(alf_path, 'wheel', short_keys=True) 1agb
267 except (StopIteration, ALFObjectNotFound): 1ch
268 # Extract from raw data
269 if self.sync != 'bpod' and self.sync is not None: 1ch
270 if ns == 'spikeglx':
271 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
272 elif ns == 'timeline':
273 extractor = mesoscope.TimelineTrials(self.session_path, sync_collection=self.sync_collection)
274 wheel_data = extractor.extract_wheel_sync()
275 else:
276 raise NotImplementedError(f'Unknown namespace "{ns}"')
277 else:
278 if self.protocol is not None and 'habituation' in self.protocol: 1ch
279 wheel_data = training_wheel.get_wheel_position( 1c
280 self.session_path, task_collection=task_collection)
281 else:
282 wheel_data = [None, None] 1h
284 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) 1ch
286 # Find short period of wheel motion for motion correlation.
287 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: 1achgb
288 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel']) 1agb
290 # Load Bonsai frame timestamps
291 try: 1achgb
292 ssv_times = raw.load_camera_ssv_times(self.session_path, self.label) 1achgb
293 self.data['bonsai_times'], self.data['camera_times'] = ssv_times 1achgb
294 except AssertionError:
295 _log.warning('No Bonsai video timestamps file found')
297 # Gather information from video file
298 if load_video: 1achgb
299 _log.info('Inspecting video file...') 1achb
300 self.load_video_data() 1achb
302 def load_video_data(self):
303 """Get basic properties of video.
305 Updates the `data` property with video metadata such as length and frame count, as well as
306 loading some frames to perform QC on.
307 """
308 try: 1fachb
309 self.data['video'] = get_video_meta(self.video_path, one=self.one) 1fachb
310 # Sample some frames from the video file
311 indices = np.linspace(100, self.data['video'].length - 100, self.n_samples).astype(int) 1fachb
312 self.frame_samples_idx = indices 1fachb
313 self.data['frame_samples'] = get_video_frames_preload(self.video_path, indices, 1fachb
314 mask=np.s_[:, :, 0])
315 except AssertionError:
316 _log.error('Failed to read video file; setting outcome to CRITICAL')
317 self._outcome = spec.QC.CRITICAL
319 @staticmethod
320 def get_active_wheel_period(wheel, duration_range=(3., 20.), display=False):
321 """
322 Find period of active wheel movement.
324 Attempts to find a period of movement where the wheel accelerates and decelerates for the
325 wheel motion alignment QC.
327 :param wheel: A Bunch of wheel timestamps and position data
328 :param duration_range: The candidates must be within min/max duration range
329 :param display: If true, plot the selected wheel movement
330 :return: 2-element array comprising the start and end times of the active period
331 """
332 pos, ts = wh.interpolate_position(wheel.timestamps, wheel.position) 1magb
333 v, acc = wh.velocity_filtered(pos, 1000) 1magb
334 on, off, *_ = wh.movements(ts, acc, pos_thresh=.1, make_plots=False) 1magb
335 edges = np.c_[on, off] 1magb
336 indices, _ = np.where(np.logical_and( 1magb
337 np.diff(edges) > duration_range[0], np.diff(edges) < duration_range[1]))
338 if len(indices) == 0: 1magb
339 _log.warning('No period of wheel movement found for motion alignment.') 1m
340 return None 1m
341 # Pick movement somewhere in the middle
342 i = indices[int(indices.size / 2)] 1agb
343 if display: 1agb
344 _, (ax0, ax1) = plt.subplots(2, 1, sharex='all')
345 mask = np.logical_and(ts > edges[i][0], ts < edges[i][1])
346 ax0.plot(ts[mask], pos[mask])
347 ax1.plot(ts[mask], acc[mask])
348 return edges[i] 1agb
350 def _set_sync(self, session_params=False):
351 """Set the sync and sync_collection attributes if not already set.
353 Also set the type attribute based on the sync. NB The type attribute is for legacy sessions.
355 Parameters
356 ----------
357 session_params : dict, bool
358 The loaded experiment description file. If False, attempts to load it from the session_path.
359 """
360 if session_params is False: 1achgb
361 if not self.session_path:
362 raise ValueError('No session path set')
363 session_params = read_params(self.session_path)
364 sync, sync_dict = get_sync(session_params) 1achgb
365 self.sync = self.sync or sync 1achgb
366 self.sync_collection = self.sync_collection or sync_dict.get('collection') 1achgb
367 if self.sync: 1achgb
368 self._type = 'ephys' if self.sync == 'nidq' else 'training' 1acgb
370 def _update_meta_from_session_params(self, sess_params):
371 """
372 Update the default expected video properties.
374 Use properties defined in the experiment description file, if present. This updates the
375 `video_meta` property with the fps, width and height for the type and camera label.
377 Parameters
378 ----------
379 sess_params : dict
380 The loaded experiment description file.
381 """
382 try: 1achgb
383 assert sess_params 1achgb
384 video_pars = sess_params.get('devices', {}).get('cameras', {}).get(self.label) 1agb
385 except (AssertionError, KeyError): 1ch
386 return 1ch
387 PROPERTIES = ('width', 'height', 'fps') 1agb
388 video_meta = copy.deepcopy(self.video_meta) # must re-assign as it's a class attribute 1agb
389 if self.type not in video_meta: 1agb
390 video_meta[self.type] = {}
391 if self.label not in video_meta[self.type]: 1agb
392 video_meta[self.type][self.label] = {}
393 video_meta[self.type][self.label].update( 1agb
394 **{k: v for k, v in video_pars.items() if k in PROPERTIES}
395 )
396 self.video_meta = video_meta 1agb
398 def run(self, update: bool = False, **kwargs) -> (str, dict):
399 """
400 Run video QC checks and return outcome.
402 :param update: if True, updates the session QC fields on Alyx
403 :param download_data: if True, downloads any missing data if required
404 :param extract_times: if True, re-extracts the camera timestamps from the raw data
405 :returns: overall outcome as a str, a dict of checks and their outcomes
406 """
407 _log.info(f'Computing QC outcome for {self.label} camera, session {self.eid}') 1achb
408 namespace = f'video{self.label.capitalize()}' 1achb
409 if all(x is None for x in self.data.values()): 1achb
410 self.load_data(**kwargs) 1ah
411 if self.data['frame_samples'] is None or self.data['timestamps'] is None: 1achb
412 return spec.QC.NOT_SET, {} 1h
413 if self.data['timestamps'].shape[0] == 0: 1acb
414 _log.error(f'No timestamps for {self.label} camera; setting outcome to CRITICAL')
415 return spec.QC.CRITICAL, {}
417 def is_metric(x): 1acb
418 return isfunction(x) and x.__name__.startswith('check_') 1acb
419 # import importlib
420 # classe = getattr(importlib.import_module(self.__module__), self.__name__)
421 # print(classe)
423 checks = getmembers(self.__class__, is_metric) 1acb
424 checks = self._remove_check(checks) 1acb
425 self.metrics = {f'_{namespace}_' + k[6:]: fn(self) for k, fn in checks} 1acb
427 values = [x if isinstance(x, spec.QC) else x[0] for x in self.metrics.values()] 1acb
428 outcome = max(map(spec.QC.validate, values)) 1acb
430 if update: 1acb
431 extended = dict()
432 for k, v in self.metrics.items():
433 if v is None:
434 extended[k] = spec.QC.NOT_SET.name
435 elif isinstance(v, tuple):
436 extended[k] = tuple(i.name if isinstance(i, spec.QC) else i for i in v)
437 else:
438 extended[k] = v.name
440 self.update_extended_qc(extended)
441 self.update(outcome, namespace)
442 return outcome, self.metrics 1acb
444 def _remove_check(self, checks):
445 """
446 Remove one or more check functions from QC checklist.
448 Parameters
449 ----------
450 checks : list of tuple
451 The complete list of check function name and handle.
453 Returns
454 -------
455 list of tuple
456 The list of check function name and handle, sans names in `checks_to_remove` property.
457 """
458 if len(self.checks_to_remove) == 0: 1acb
459 return checks 1ab
460 else:
461 for check in self.checks_to_remove: 1c
462 check_names = [ch[0] for ch in checks] 1c
463 idx = check_names.index(check) 1c
464 checks.pop(idx) 1c
465 return checks 1c
467 def check_brightness(self, bounds=(40, 200), max_std=20, roi=True, display=False):
468 """Check that the video brightness is within a given range.
470 The mean brightness of each frame must be with the bounds provided, and the standard
471 deviation across samples frames should be less than the given value. Assumes that the
472 frame samples are 2D (no colour channels).
474 :param bounds: For each frame, check that: bounds[0] < M < bounds[1],
475 where M = mean(frame). If less than 75% of sample frames outside of these bounds, the
476 outcome is WARNING. If <75% of frames within twice the bounds, the outcome is FAIL.
477 :param max_std: The standard deviation of the frame luminance means must be less than this
478 :param roi: If True, check brightness on ROI of frame
479 :param display: When True the mean frame luminance is plotted against sample frames.
480 The sample frames with the lowest and highest mean luminance are shown.
481 """
482 if self.data['frame_samples'] is None: 1jacbe
483 return spec.QC.NOT_SET 1j
484 if roi is True: 1jacbe
485 _, h, w = self.data['frame_samples'].shape 1jacbe
486 if self.label == 'body': # Latter half 1jacbe
487 roi = (slice(None), slice(None), slice(int(w / 2), None, None)) 1e
488 elif self.label == 'left': # Top left quadrant (~2/3, 1/2 height) 1jacbe
489 roi = (slice(None), slice(None, int(h / 2), None), slice(None, int(w * .66), None)) 1jacbe
490 else: # Top right quadrant (~2/3 width, 1/2 height)
491 roi = (slice(None), slice(None, int(h / 2), None), slice(int(w * .66), None, None)) 1e
492 else:
493 roi = (slice(None), slice(None), slice(None))
494 brightness = self.data['frame_samples'][roi].mean(axis=(1, 2)) 1jacbe
495 # dims = self.data['frame_samples'].shape
496 # brightness /= np.array((*dims[1:], 255)).prod() # Normalize
498 if display: 1jacbe
499 f = plt.figure() 1j
500 gs = f.add_gridspec(2, 3) 1j
501 indices = self.frame_samples_idx 1j
502 # Plot mean frame luminance
503 ax = f.add_subplot(gs[:2, :2]) 1j
504 plt.plot(indices, brightness, label='brightness') 1j
505 ax.set( 1j
506 xlabel='frame #',
507 ylabel='brightness (mean pixel)',
508 title='Brightness')
509 ax.hlines(bounds, 0, indices[-1], 1j
510 colors='tab:orange', linestyles=':', label='warning bounds')
511 ax.hlines((bounds[0] / 2, bounds[1] * 2), 0, indices[-1], 1j
512 colors='r', linestyles=':', label='failure bounds')
513 ax.legend() 1j
514 # Plot min-max frames
515 for i, idx in enumerate((np.argmax(brightness), np.argmin(brightness))): 1j
516 a = f.add_subplot(gs[i, 2]) 1j
517 ax.annotate('*', (indices[idx], brightness[idx]), # this is the point to label 1j
518 textcoords='offset points', xytext=(0, 1), ha='center')
519 frame = self.data['frame_samples'][idx][roi[1:]] 1j
520 title = ('min' if i else 'max') + ' mean luminance = %.2f' % brightness[idx] 1j
521 self.imshow(frame, ax=a, title=title) 1j
523 PCT_PASS = .75 # Proportion of sample frames that must pass 1jacbe
524 # Warning if brightness not within range (3/4 of frames must be between bounds)
525 warn_range = np.logical_and(brightness > bounds[0], brightness < bounds[1]) 1jacbe
526 warn_range = 'PASS' if sum(warn_range) / self.n_samples > PCT_PASS else 'WARNING' 1jacbe
527 # Fail if brightness not within twice the range or std less than threshold
528 fail_range = np.logical_and(brightness > bounds[0] / 2, brightness < bounds[1] * 2) 1jacbe
529 within_range = sum(fail_range) / self.n_samples > PCT_PASS 1jacbe
530 fail_range = 'PASS' if within_range and np.std(brightness) < max_std else 'FAIL' 1jacbe
531 return self.overall_outcome([warn_range, fail_range]) 1jacbe
533 def check_file_headers(self):
534 """Check reported frame rate matches FPGA frame rate."""
535 if None in (self.data['video'], self.video_meta): 1tacbe
536 return spec.QC.NOT_SET 1t
537 expected = self.video_meta[self.type][self.label] 1tacbe
538 return spec.QC.PASS if self.data['video']['fps'] == expected['fps'] else spec.QC.FAIL 1tacbe
540 def check_framerate(self, threshold=1.):
541 """Check camera times match specified frame rate for camera.
543 :param threshold: The maximum absolute difference between timestamp sample rate and video
544 frame rate. NB: Does not take into account dropped frames.
545 """
546 if any(x is None for x in (self.data['timestamps'], self.video_meta)): 1racb
547 return spec.QC.NOT_SET 1r
548 fps = self.video_meta[self.type][self.label]['fps'] 1racb
549 Fs = 1 / np.median(np.diff(self.data['timestamps'])) # Approx. frequency of camera 1racb
550 return spec.QC.PASS if abs(Fs - fps) < threshold else spec.QC.FAIL, float(round(Fs, 3)) 1racb
552 def check_pin_state(self, display=False):
553 """Check the pin state reflects Bpod TTLs."""
554 if not data_for_keys(('video', 'pin_state', 'audio'), self.data): 1kacb
555 return spec.QC.NOT_SET 1k
556 size_diff = int(self.data['pin_state'].shape[0] - self.data['video']['length']) 1kacb
557 # NB: The pin state can be high for 2 consecutive frames
558 low2high = np.insert(np.diff(self.data['pin_state'][:, -1].astype(int)) == 1, 0, False) 1kacb
559 # NB: Time between two consecutive TTLs can be sub-frame, so this will fail
560 ndiff_low2high = int(self.data['audio'][::2].size - sum(low2high)) 1kacb
561 # state_ttl_matches = ndiff_low2high == 0
562 # Check within ms of audio times
563 if display: 1kacb
564 plt.Figure() 1k
565 plt.plot(self.data['timestamps'][low2high], np.zeros(sum(low2high)), 'o', 1k
566 label='GPIO Low -> High')
567 plt.plot(self.data['audio'], np.zeros(self.data['audio'].size), 'rx', 1k
568 label='Audio TTL High')
569 plt.xlabel('FPGA frame times / s') 1k
570 plt.gca().set(yticklabels=[]) 1k
571 plt.gca().tick_params(left=False) 1k
572 plt.legend() 1k
574 outcome = self.overall_outcome( 1kacb
575 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
576 'PASS' if np.abs(ndiff_low2high) < 5 else 'WARNING')
577 )
578 return outcome, ndiff_low2high, size_diff 1kacb
580 def check_dropped_frames(self, threshold=.1):
581 """Check how many frames were reported missing.
583 :param threshold: The maximum allowable percentage of dropped frames
584 """
585 if not data_for_keys(('video', 'count'), self.data): 1lacb
586 return spec.QC.NOT_SET 1l
587 size_diff = int(self.data['count'].size - self.data['video']['length']) 1lacb
588 strict_increase = np.all(np.diff(self.data['count']) > 0) 1lacb
589 if not strict_increase: 1lacb
590 n_affected = np.sum(np.invert(strict_increase)) 1l
591 _log.info(f'frame count not strictly increasing: ' 1l
592 f'{n_affected} frames affected ({n_affected / strict_increase.size:.2%})')
593 return spec.QC.CRITICAL 1l
594 dropped = np.diff(self.data['count']).astype(int) - 1 1lacb
595 pct_dropped = (sum(dropped) / len(dropped) * 100) 1lacb
596 # Calculate overall outcome for this check
597 outcome = self.overall_outcome( 1lacb
598 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
599 'PASS' if pct_dropped < threshold else 'FAIL')
600 )
601 return outcome, int(sum(dropped)), size_diff 1lacb
603 def check_timestamps(self):
604 """Check that the camera.times array is reasonable."""
605 if not data_for_keys(('timestamps', 'video'), self.data): 1sacb
606 return spec.QC.NOT_SET 1s
607 # Check number of timestamps matches video
608 length_matches = self.data['timestamps'].size == self.data['video'].length 1sacb
609 # Check times are strictly increasing
610 increasing = all(np.diff(self.data['timestamps']) > 0) 1sacb
611 # Check times do not contain nans
612 nanless = not np.isnan(self.data['timestamps']).any() 1sacb
613 return spec.QC.PASS if increasing and length_matches and nanless else spec.QC.FAIL 1sacb
615 def check_camera_times(self):
616 """Check that the number of raw camera timestamps matches the number of video frames."""
617 if not data_for_keys(('bonsai_times', 'video'), self.data): 1uacb
618 return spec.QC.NOT_SET 1u
619 length_match = len(self.data['camera_times']) == self.data['video'].length 1uacb
620 outcome = spec.QC.PASS if length_match else spec.QC.WARNING 1uacb
621 # 1 / np.median(np.diff(self.data.camera_times))
622 return outcome, len(self.data['camera_times']) - self.data['video'].length 1uacb
624 def check_resolution(self):
625 """Check that the timestamps and video file resolution match what we expect."""
626 if self.data['video'] is None: 1oacbe
627 return spec.QC.NOT_SET 1o
628 actual = self.data['video'] 1oacbe
629 expected = self.video_meta[self.type][self.label] 1oacbe
630 match = actual['width'] == expected['width'] and actual['height'] == expected['height'] 1oacbe
631 return spec.QC.PASS if match else spec.QC.FAIL 1oacbe
633 def check_wheel_alignment(self, tolerance=(1, 2), display=False):
634 """Check wheel motion in video correlates with the rotary encoder signal.
636 Check is skipped for body camera videos as the wheel is often obstructed
638 Parameters
639 ----------
640 tolerance : int, (int, int)
641 Maximum absolute offset in frames. If two values, the maximum value is taken as the
642 warning threshold.
643 display : bool
644 If true, the wheel motion energy is plotted against the rotary encoder.
646 Returns
647 -------
648 one.alf.spec.QC
649 The outcome, one of {'NOT_SET', 'FAIL', 'WARNING', 'PASS'}.
650 int
651 Frame offset, i.e. by how many frames the video was shifted to match the rotary encoder
652 signal. Negative values mean the video was shifted backwards with respect to the wheel
653 timestamps.
655 Notes
656 -----
657 - A negative frame offset typically means that there were frame TTLs at the beginning that
658 do not correspond to any video frames (sometimes the first few frames aren't saved to
659 disk). Since 2021-09-15 the extractor should compensate for this.
660 """
661 wheel_present = data_for_keys(('position', 'timestamps', 'period'), self.data['wheel']) 1nmab
662 if not wheel_present or self.label == 'body': 1nmab
663 return spec.QC.NOT_SET 1nm
665 # Check the selected wheel movement period occurred within camera timestamp time
666 camera_times = self.data['timestamps'] 1nab
667 in_range = within_ranges(camera_times, self.data['wheel']['period'].reshape(-1, 2)) 1nab
668 if not in_range.any(): 1nab
669 # Check if any camera timestamps overlap with the wheel times
670 if np.any(np.logical_and( 1n
671 camera_times > self.data['wheel']['timestamps'][0],
672 camera_times < self.data['wheel']['timestamps'][-1])
673 ):
674 _log.warning('Unable to check wheel alignment: ' 1n
675 'chosen movement is not during video')
676 return spec.QC.NOT_SET 1n
677 else:
678 # No overlap, return fail
679 return spec.QC.FAIL 1n
680 aln = MotionAlignment(self.eid, self.one, self.log, session_path=self.session_path) 1ab
681 aln.data = self.data.copy() 1ab
682 aln.data['camera_times'] = {self.label: camera_times} 1ab
683 aln.video_paths = {self.label: self.video_path} 1ab
684 offset, *_ = aln.align_motion(period=self.data['wheel'].period, 1ab
685 display=display, side=self.label)
686 if offset is None: 1ab
687 return spec.QC.NOT_SET
688 if display: 1ab
689 aln.plot_alignment()
691 # Determine the outcome. If there are two values for the tolerance, one is taken to be
692 # a warning threshold, the other a failure threshold.
693 out_map = {0: spec.QC.WARNING, 1: spec.QC.WARNING, 2: spec.QC.PASS} # 0: FAIL -> WARNING Aug 2022 1ab
694 passed = np.abs(offset) <= np.sort(np.array(tolerance)) 1ab
695 return out_map[sum(passed)], int(offset) 1ab
697 def check_position(self, hist_thresh=(75, 80), pos_thresh=(10, 15),
698 metric=cv2.TM_CCOEFF_NORMED,
699 display=False, test=False, roi=None, pct_thresh=True):
700 """Check camera is positioned correctly.
702 For the template matching zero-normalized cross-correlation (default) should be more
703 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
704 also work.
706 If display is True, the template ROI (pick hashed) is plotted over a video frame,
707 along with the threshold regions (green solid). The histogram correlations are plotted
708 and the full histogram is plotted for one of the sample frames and the reference frame.
710 :param hist_thresh: The minimum histogram cross-correlation threshold to pass (0-1).
711 :param pos_thresh: The maximum number of pixels off that the template matcher may be off
712 by. If two values are provided, the lower threshold is treated as a warning boundary.
713 :param metric: The metric to use for template matching.
714 :param display: If true, the results are plotted
715 :param test: If true a reference frame instead of the frames in frame_samples.
716 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
717 :param pct_thresh: If true, the thresholds are treated as percentages
718 """
719 if not test and self.data['frame_samples'] is None: 1iacbe
720 return spec.QC.NOT_SET 1i
721 refs = self.load_reference_frames(self.label) 1iacbe
722 # ensure iterable
723 pos_thresh = np.sort(np.array(pos_thresh)) 1iacbe
724 hist_thresh = np.sort(np.array(hist_thresh)) 1iacbe
726 # Method 1: compareHist
727 #### Mean hist comparison
728 # ref_h = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in refs]
729 # ref_h = np.array(ref_h).mean(axis=0)
730 # frames = refs if test else self.data['frame_samples']
731 # hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames]
732 # test_h = np.array(hists).mean(axis=0)
733 # corr = cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL)
734 # if pct_thresh:
735 # corr *= 100
736 # hist_passed = corr > hist_thresh
737 ####
738 ref_h = cv2.calcHist([refs[0]], [0], None, [256], [0, 256]) 1iacbe
739 frames = refs if test else self.data['frame_samples'] 1iacbe
740 hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames] 1iacbe
741 corr = np.array([cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL) for test_h in hists]) 1iacbe
742 if pct_thresh: 1iacbe
743 corr *= 100 1iacbe
744 hist_passed = [np.all(corr > x) for x in hist_thresh] 1iacbe
746 # Method 2:
747 top_left, roi, template = self.find_face(roi=roi, test=test, metric=metric, refs=refs) 1iacbe
748 (y1, y2), (x1, x2) = roi 1iacbe
749 err = (x1, y1) - np.median(np.array(top_left), axis=0) 1iacbe
750 h, w = frames[0].shape[:2] 1iacbe
752 if pct_thresh: # Threshold as percent 1iacbe
753 # t_x, t_y = pct_thresh
754 err_pct = [(abs(x) / y) * 100 for x, y in zip(err, (h, w))] 1iacbe
755 face_passed = [all(err_pct < x) for x in pos_thresh] 1iacbe
756 else:
757 face_passed = [np.all(np.abs(err) < x) for x in pos_thresh] 1i
759 if display: 1iacbe
760 plt.figure() 1i
761 # Plot frame with template overlay
762 img = frames[0] 1i
763 ax0 = plt.subplot(221) 1i
764 ax0.imshow(img, cmap='gray', vmin=0, vmax=255) 1i
765 bounds = (x1 - err[0], x2 - err[0], y2 - err[1], y1 - err[1]) 1i
766 ax0.imshow(template, cmap='gray', alpha=0.5, extent=bounds) 1i
767 if pct_thresh: 1i
768 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1i
769 t_y = (h / 100) * thresh 1i
770 t_x = (w / 100) * thresh 1i
771 xy = (x1 - t_x, y1 - t_y) 1i
772 ax0.add_patch(Rectangle(xy, x2 - x1 + (t_x * 2), y2 - y1 + (t_y * 2), 1i
773 fill=True, facecolor=c, lw=0, alpha=0.05))
774 else:
775 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1i
776 xy = (x1 - thresh, y1 - thresh) 1i
777 ax0.add_patch(Rectangle(xy, x2 - x1 + (thresh * 2), y2 - y1 + (thresh * 2), 1i
778 fill=True, facecolor=c, lw=0, alpha=0.05))
779 xy = (x1 - err[0], y1 - err[1]) 1i
780 ax0.add_patch(Rectangle(xy, x2 - x1, y2 - y1, 1i
781 edgecolor='pink', fill=False, hatch='//', lw=1))
782 ax0.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0)) 1i
783 ax0.set_axis_off() 1i
784 # Plot the image histograms
785 ax1 = plt.subplot(212) 1i
786 ax1.plot(ref_h[5:-1], label='reference frame') 1i
787 ax1.plot(np.array(hists).mean(axis=0)[5:-1], label='mean frame') 1i
788 ax1.set_xlim([0, 256]) 1i
789 plt.legend() 1i
790 # Plot the correlations for each sample frame
791 ax2 = plt.subplot(222) 1i
792 ax2.plot(corr, label='hist correlation') 1i
793 ax2.axhline(hist_thresh[0], 0, self.n_samples, 1i
794 linestyle=':', color='r', label='fail threshold')
795 ax2.axhline(hist_thresh[1], 0, self.n_samples, 1i
796 linestyle=':', color='g', label='pass threshold')
797 ax2.set(xlabel='Sample Frame #', ylabel='Hist correlation') 1i
798 plt.legend() 1i
799 plt.suptitle('Check position') 1i
800 plt.show() 1i
802 pass_map = {i: s for i, s in enumerate(('FAIL', 'WARNING', 'PASS'))} 1iacbe
803 face_aligned = pass_map[sum(face_passed)] 1iacbe
804 hist_correlates = pass_map[sum(hist_passed)] 1iacbe
806 return self.overall_outcome([face_aligned, hist_correlates], agg=min) 1iacbe
808 def check_focus(self, n=20, threshold=(100, 6),
809 roi=False, display=False, test=False, equalize=True):
810 """Check video is in focus.
812 Two methods are used here: Looking at the high frequencies with a DFT and
813 applying a Laplacian HPF and looking at the variance.
815 Note:
816 - Both methods are sensitive to noise (Laplacian is 2nd order filter).
817 - The thresholds for the fft may need to be different for the left/right vs body as
818 the distribution of frequencies in the image is different (e.g. the holder
819 comprises mostly very high frequencies).
820 - The image may be overall in focus but the places we care about can still be out of
821 focus (namely the face). For this we'll take an ROI around the face.
822 - Focus check thrown off by brightness. This may be fixed by equalizing the histogram
823 (set equalize=True)
825 Parameters
826 ----------
827 n : int
828 Number of frames from frame_samples data to use in check.
829 threshold : tuple of float
830 The lower boundary for Laplacian variance and mean FFT filtered brightness,
831 respectively.
832 roi : bool, None, list of slice
833 If False, the roi is determined via template matching for the face or body.
834 If None, some set ROIs for face and paws are used. A list of slices may also be passed.
835 display : bool
836 If true, the results are displayed.
837 test : bool
838 If true, a set of artificially blurred reference frames are used as the input. This can
839 be used to selecting reasonable thresholds.
840 equalize : bool
841 If true, the histograms of the frames are equalized, resulting in an increased the
842 global contrast and linear CDF. This makes check robust to low light conditions.
844 Returns
845 -------
846 one.spec.QC
847 The QC outcome, either FAIL or PASS.
848 """
849 no_frames = self.data['frame_samples'] is None or len(self.data['frame_samples']) == 0 1dacbe
850 if not test and no_frames: 1dacbe
851 return spec.QC.NOT_SET 1d
853 if roi is False: 1dacbe
854 top_left, roi, _ = self.find_face(test=test) # (y1, y2), (x1, x2) 1dacbe
855 h, w = map(lambda x: np.diff(x).item(), roi) 1dacbe
856 x, y = np.median(np.array(top_left), axis=0).round().astype(int) 1dacbe
857 roi = (np.s_[y: y + h, x: x + w],) 1dacbe
858 else:
859 ROI = { 1d
860 'left': (np.s_[:400, :561], np.s_[500:, 100:800]), # (face, wheel)
861 'right': (np.s_[:196, 397:], np.s_[221:, 255:]),
862 'body': (np.s_[143:274, 84:433],) # body holder
863 }
864 roi = roi or ROI[self.label] 1d
866 if test: 1dacbe
867 """In test mode load a reference frame and run it through a normalized box filter with 1d
868 increasing kernel size.
869 """
870 idx = (0,) 1d
871 ref = self.load_reference_frames(self.label)[idx] 1d
872 kernal_sz = np.unique(np.linspace(0, 15, n, dtype=int)) 1d
873 n = kernal_sz.size # Size excluding repeated kernels 1d
874 img = np.empty((n, *ref.shape), dtype=np.uint8) 1d
875 for i, k in enumerate(kernal_sz): 1d
876 img[i] = ref.copy() if k == 0 else cv2.blur(ref, (k, k)) 1d
877 if equalize: 1d
878 [cv2.equalizeHist(x, x) for x in img] 1d
879 if display: 1d
880 # Plot blurred images
881 f, axes = plt.subplots(1, len(kernal_sz)) 1d
882 for ax, ig, k in zip(axes, img, kernal_sz): 1d
883 self.imshow(ig, ax=ax, title='Kernal ({0}, {0})'.format(k or 'None')) 1d
884 f.suptitle('Reference frame with box filter') 1d
885 else:
886 # Sub-sample the frame samples
887 idx = np.unique(np.linspace(0, len(self.data['frame_samples']) - 1, n, dtype=int)) 1dacbe
888 img = self.data['frame_samples'][idx] 1dacbe
889 if equalize: 1dacbe
890 [cv2.equalizeHist(x, x) for x in img] 1dacbe
892 # A measure of the sharpness effectively taking the second derivative of the image
893 lpc_var = np.empty((min(n, len(img)), len(roi))) 1dacbe
894 for i, frame in enumerate(img[::-1]): 1dacbe
895 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1dacbe
896 lpc_var[i] = [lpc[mask].var() for mask in roi] 1dacbe
898 if display: 1dacbe
899 # Plot the first sample image
900 f = plt.figure() 1d
901 gs = f.add_gridspec(len(roi) + 1, 3) 1d
902 f.add_subplot(gs[0:len(roi), 0]) 1d
903 frame = img[0] 1d
904 self.imshow(frame, title=f'Frame #{self.frame_samples_idx[idx[0]]}') 1d
905 # Plot the ROIs with and without filter
906 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1d
907 abs_lpc = cv2.convertScaleAbs(lpc) 1d
908 for i, r in enumerate(roi): 1d
909 f.add_subplot(gs[i, 1]) 1d
910 self.imshow(frame[r], title=f'ROI #{i + 1}') 1d
911 f.add_subplot(gs[i, 2]) 1d
912 self.imshow(abs_lpc[r], title=f'ROI #{i + 1} - Lapacian filter') 1d
913 f.suptitle('Laplacian blur detection') 1d
914 # Plot variance over frames
915 ax = f.add_subplot(gs[len(roi), :]) 1d
916 ln = plt.plot(lpc_var) 1d
917 [l.set_label(f'ROI #{i + 1}') for i, l in enumerate(ln)] 1d
918 ax.axhline(threshold[0], 0, n, linestyle=':', color='r', label='lower threshold') 1d
919 ax.set(xlabel='Frame sample', ylabel='Variance of the Laplacian') 1d
920 plt.tight_layout() 1d
921 plt.legend() 1d
923 # Second test is to highpass with dft
924 h, w = img.shape[1:] 1dacbe
925 cX, cY = w // 2, h // 2 1dacbe
926 sz = 60 # Seems to be the magic number for high pass 1dacbe
927 mask = np.ones((h, w, 2), bool) 1dacbe
928 mask[cY - sz:cY + sz, cX - sz:cX + sz] = False 1dacbe
929 filt_mean = np.empty(len(img)) 1dacbe
930 for i, frame in enumerate(img[::-1]): 1dacbe
931 dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT) 1dacbe
932 f_shift = np.fft.fftshift(dft) * mask # Shift & remove low frequencies 1dacbe
933 f_ishift = np.fft.ifftshift(f_shift) # Shift back 1dacbe
934 filt_frame = cv2.idft(f_ishift) # Reconstruct 1dacbe
935 filt_frame = cv2.magnitude(filt_frame[..., 0], filt_frame[..., 1]) 1dacbe
936 # Re-normalize to 8-bits to make threshold simpler
937 img_back = cv2.normalize(filt_frame, None, alpha=0, beta=256, 1dacbe
938 norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
939 filt_mean[i] = np.mean(img_back) 1dacbe
940 if i == len(img) - 1 and display: 1dacbe
941 # Plot Fourier transforms
942 f = plt.figure() 1d
943 gs = f.add_gridspec(2, 3) 1d
944 self.imshow(img[0], ax=f.add_subplot(gs[0, 0]), title='Original frame') 1d
945 dft_shift = np.fft.fftshift(dft) 1d
946 magnitude = 20 * np.log(cv2.magnitude(dft_shift[..., 0], dft_shift[..., 1])) 1d
947 self.imshow(magnitude, ax=f.add_subplot(gs[0, 1]), title='Magnitude spectrum') 1d
948 self.imshow(img_back, ax=f.add_subplot(gs[0, 2]), title='Filtered frame') 1d
949 ax = f.add_subplot(gs[1, :]) 1d
950 ax.plot(filt_mean) 1d
951 ax.axhline(threshold[1], 0, n, linestyle=':', color='r', label='lower threshold') 1d
952 ax.set(xlabel='Frame sample', ylabel='Mean of filtered frame') 1d
953 f.suptitle('Discrete Fourier Transform') 1d
954 plt.show() 1d
955 passes = np.all(lpc_var > threshold[0]) or np.all(filt_mean > threshold[1]) 1dacbe
956 return spec.QC.PASS if passes else spec.QC.FAIL 1dacbe
958 def find_face(self, roi=None, test=False, metric=cv2.TM_CCOEFF_NORMED, refs=None):
959 """Use template matching to find face location in frame.
961 For the template matching zero-normalized cross-correlation (default) should be more
962 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
963 also work. That said, normalizing the histograms works best.
965 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
966 :param test: If True the template is matched against frames that come from the same session
967 :param metric: The metric to use for template matching
968 :param refs: An array of frames to match the template to
970 :returns: (y1, y2), (x1, x2)
971 """
972 ROI = { 1diacbe
973 'left': ((45, 346), (138, 501)),
974 'right': ((14, 174), (430, 618)),
975 'body': ((141, 272), (90, 339))
976 }
977 roi = roi or ROI[self.label] 1diacbe
978 refs = self.load_reference_frames(self.label) if refs is None else refs 1diacbe
980 frames = refs if test else self.data['frame_samples'] 1diacbe
981 template = refs[0][tuple(slice(*r) for r in roi)] 1diacbe
982 top_left = [] # [(x1, y1), ...] 1diacbe
983 for frame in frames: 1diacbe
984 res = cv2.matchTemplate(frame, template, metric) 1diacbe
985 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) 1diacbe
986 # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
987 top_left.append(min_loc if metric < 2 else max_loc) 1diacbe
988 # bottom_right = (top_left[0] + w, top_left[1] + h)
989 return top_left, roi, template 1diacbe
991 @staticmethod
992 def load_reference_frames(side):
993 """Load some reference frames for a given video.
995 The reference frames are from sessions where the camera was well positioned. The
996 frames are in qc/reference, one file per camera, only one channel per frame. The
997 session eids can be found in qc/reference/frame_src.json
999 :param side: Video label, e.g. 'left'
1000 :return: numpy array of frames with the shape (n, h, w)
1001 """
1002 file = next(Path(__file__).parent.joinpath('reference').glob(f'frames_{side}.npy')) 1jdiacbe
1003 refs = np.load(file) 1jdiacbe
1004 return refs 1jdiacbe
1006 @staticmethod
1007 def imshow(frame, ax=None, title=None, **kwargs):
1008 """plt.imshow with some convenient defaults for greyscale frames."""
1009 h = ax or plt.gca() 1jd
1010 defaults = { 1jd
1011 'cmap': kwargs.pop('cmap', 'gray'),
1012 'vmin': kwargs.pop('vmin', 0),
1013 'vmax': kwargs.pop('vmax', 255)
1014 }
1015 h.imshow(frame, **defaults, **kwargs) 1jd
1016 h.set(title=title) 1jd
1017 h.set_axis_off() 1jd
1018 return ax 1jd
1021class CameraQCCamlog(CameraQC):
1022 """
1023 A class for computing camera QC metrics from camlog data.
1025 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1026 timestamp alignment.
1027 """
1029 dstypes = [
1030 '_iblrig_taskData.raw',
1031 '_iblrig_taskSettings.raw',
1032 '_iblrig_Camera.raw',
1033 'camera.times',
1034 'wheel.position',
1035 'wheel.timestamps'
1036 ]
1037 dstypes_fpga = [
1038 '_spikeglx_sync.channels',
1039 '_spikeglx_sync.polarities',
1040 '_spikeglx_sync.times',
1041 'DAQData.raw.meta',
1042 'DAQData.wiring'
1043 ]
1045 def __init__(self, session_path_or_eid, camera, sync_collection='raw_sync_data', sync_type='nidq', **kwargs):
1046 """Compute camera QC metrics from camlog data.
1048 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1049 timestamp alignment.
1050 """
1051 super().__init__(session_path_or_eid, camera, sync_collection=sync_collection, sync_type=sync_type, **kwargs)
1052 self._type = 'ephys'
1053 self.checks_to_remove = ['check_pin_state']
1055 def load_data(self, extract_times: bool = False, load_video: bool = True, **kwargs) -> None:
1056 """Extract the data from raw data files.
1058 Extracts all the required task data from the raw data files.
1060 Data keys:
1061 - count (int array): the sequential frame number (n, n+1, n+2...)
1062 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
1063 - audio (float array): timestamps of audio TTL fronts
1064 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
1065 - timestamps (float array): extracted video timestamps (the camera.times ALF)
1066 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
1067 - camera_times (float array): camera frame timestamps extracted from frame headers
1068 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
1069 - video (Bunch): video meta data, including dimensions and FPS
1070 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
1072 Missing data will raise an AssertionError
1073 :param extract_times: if True, the camera.times are re-extracted from the raw data
1074 :param load_video: if True, calls the load_video_data method
1075 """
1076 assert self.session_path, 'no session path set'
1077 _log.info('Gathering data for QC')
1079 # If there is an experiment description and there are video parameters
1080 sess_params = read_params(self.session_path) or {}
1081 video_collection = get_video_collection(sess_params, self.label)
1082 task_collection = get_task_collection(sess_params)
1083 self._set_sync(sess_params)
1084 self._update_meta_from_session_params(sess_params)
1086 # Get frame count
1087 log, _ = parse_cam_log(self.session_path.joinpath(video_collection, f'_iblrig_{self.label}Camera.raw.camlog'))
1088 self.data['count'] = log.frame_id.values
1090 # Load the audio and raw FPGA times
1091 sync = chmap = None
1092 if self.sync != 'bpod' and self.sync is not None:
1093 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection)
1094 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio'])
1095 self.data['audio'] = audio_ttls['times'] # Get rises
1096 # Load raw FPGA times
1097 cam_ts = camio.extract_camera_sync(sync, chmap)
1098 self.data['fpga_times'] = cam_ts[self.label]
1099 else:
1100 bpod_data = raw.load_data(self.session_path, task_collection=task_collection)
1101 _, audio_ttls = raw.load_bpod_fronts(self.session_path, data=bpod_data, task_collection=task_collection)
1102 self.data['audio'] = audio_ttls['times']
1104 # Load extracted frame times
1105 alf_path = self.session_path / 'alf'
1106 try:
1107 assert not extract_times
1108 cam_path = next(alf_path.rglob(f'*{self.label}Camera.times*')).parent
1109 self.data['timestamps'] = alfio.load_object(
1110 cam_path, f'{self.label}Camera', short_keys=True)['times']
1111 except AssertionError: # Re-extract
1112 kwargs = dict(video_path=self.video_path, save=False)
1113 if self.sync == 'bpod':
1114 extractor = camio.CameraTimestampsBpod(self.session_path, task_collection=task_collection)
1115 else:
1116 kwargs.update(sync=sync, chmap=chmap)
1117 extractor = camio.CameraTimestampsCamlog(self.label, self.session_path)
1118 outputs, _ = extractor.extract(**kwargs)
1119 self.data['timestamps'] = outputs[f'{self.label}_camera_timestamps']
1120 except ALFObjectNotFound:
1121 _log.warning('no camera.times ALF found for session')
1123 # Get audio and wheel data
1124 wheel_keys = ('timestamps', 'position')
1125 try:
1126 # glob in case wheel data are in sub-collections
1127 wheel_path = next(alf_path.rglob('*wheel.timestamps*')).parent
1128 self.data['wheel'] = alfio.load_object(wheel_path, 'wheel', short_keys=True)
1129 except ALFObjectNotFound:
1130 # Extract from raw data
1131 if self.sync != 'bpod':
1132 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
1133 else:
1134 wheel_data = training_wheel.get_wheel_position(self.session_path, task_collection=task_collection)
1135 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))
1137 # Find short period of wheel motion for motion correlation.
1138 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None:
1139 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel'])
1141 # load in camera times
1142 self.data['camera_times'] = log.timestamp.values
1144 # Gather information from video file
1145 if load_video:
1146 _log.info('Inspecting video file...')
1147 self.load_video_data()
1149 def check_camera_times(self):
1150 """Check that the number of raw camera timestamps matches the number of video frames."""
1151 if not data_for_keys(('camera_times', 'video'), self.data):
1152 return spec.QC.NOT_SET
1153 length_match = len(self.data['camera_times']) == self.data['video'].length
1154 outcome = spec.QC.PASS if length_match else spec.QC.WARNING
1155 # 1 / np.median(np.diff(self.data.camera_times))
1156 return outcome, len(self.data['camera_times']) - self.data['video'].length
1159def data_for_keys(keys, data):
1160 """Check keys exist in 'data' dict and contain values other than None."""
1161 return data is not None and all(k in data and data.get(k, None) is not None for k in keys) 1ulksnmachgb
1164def get_task_collection(sess_params):
1165 """
1166 Return the first non-passive task collection.
1168 Returns the first task collection from the experiment description whose task name does not
1169 contain 'passive', otherwise returns 'raw_behavior_data'.
1171 Parameters
1172 ----------
1173 sess_params : dict
1174 The loaded experiment description file.
1176 Returns
1177 -------
1178 str:
1179 The collection presumed to contain wheel data.
1180 """
1181 sess_params = sess_params or {} 1vachgb
1182 tasks = (chain(*map(dict.items, sess_params.get('tasks', [])))) 1vachgb
1183 return next((v['collection'] for k, v in tasks if 'passive' not in k), 'raw_behavior_data') 1vachgb
1186def get_video_collection(sess_params, label):
1187 """
1188 Return the collection containing the raw video data for a given camera.
1190 Parameters
1191 ----------
1192 sess_params : dict
1193 The loaded experiment description file.
1194 label : str
1195 The camera label.
1197 Returns
1198 -------
1199 str:
1200 The collection presumed to contain the video data.
1201 """
1202 DEFAULT = 'raw_video_data'
1203 value = sess_params or {}
1204 for key in ('devices', 'cameras', label, 'collection'):
1205 value = value.get(key)
1206 if not value:
1207 return DEFAULT
1208 return value
1211def run_all_qc(session, cameras=('left', 'right', 'body'), **kwargs):
1212 """Run QC for all cameras.
1214 Run the camera QC for left, right and body cameras.
1216 :param session: A session path or eid.
1217 :param update: If True, QC fields are updated on Alyx.
1218 :param cameras: A list of camera names to perform QC on.
1219 :param stream: If true and local video files not available, the data are streamed from
1220 the remote source.
1221 :return: dict of CameraCQ objects
1222 """
1223 qc = {} 1pqa
1224 camlog = kwargs.pop('camlog', False) 1pqa
1225 CamQC = CameraQCCamlog if camlog else CameraQC 1pqa
1227 run_args = {k: kwargs.pop(k) for k in ('extract_times', 'update') 1pqa
1228 if k in kwargs.keys()}
1229 for camera in cameras: 1pqa
1230 qc[camera] = CamQC(session, camera, **kwargs) 1pqa
1231 qc[camera].run(**run_args) 1pqa
1232 return qc 1pqa