Coverage for ibllib/qc/camera.py: 83%
591 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-07 14:26 +0100
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-07 14:26 +0100
1"""Video quality control.
3This module runs a list of quality control metrics on the camera and extracted video data.
5Examples
6--------
7Run right camera QC, downloading all but video file
9>>> qc = CameraQC(eid, 'right', download_data=True, stream=True)
10>>> qc.run()
12Run left camera QC with session path, update QC field in Alyx
14>>> qc = CameraQC(session_path, 'left')
15>>> outcome, extended = qc.run(update=True) # Returns outcome of videoQC only
16>>> print(f'video QC = {outcome}; overall session QC = {qc.outcome}') # NB difference outcomes
18Run only video QC (no timestamp/alignment checks) on 20 frames for the body camera
20>>> qc = CameraQC(eid, 'body', n_samples=20)
21>>> qc.load_video_data() # Quicker than loading all data
22>>> qc.run()
24Run specific video QC check and display the plots
26>>> qc = CameraQC(eid, 'left')
27>>> qc.load_data(download_data=True)
28>>> qc.check_position(display=True) # NB: Not all checks make plots
30Run the QC for all cameras
32>>> qcs = run_all_qc(eid)
33>>> qcs['left'].metrics # Dict of checks and outcomes for left camera
34"""
35import logging
36from inspect import getmembers, isfunction
37from pathlib import Path
38from itertools import chain
39import copy
41import cv2
42import numpy as np
43import matplotlib.pyplot as plt
44from matplotlib.patches import Rectangle
46import one.alf.io as alfio
47from one.alf import spec
48from one.alf.exceptions import ALFObjectNotFound
49from iblutil.util import Bunch
50from iblutil.numerical import within_ranges
52import ibllib.io.extractors.camera as camio
53from ibllib.io.extractors import ephys_fpga, training_wheel, mesoscope
54from ibllib.io.extractors.video_motion import MotionAlignment
55from ibllib.io import raw_data_loaders as raw
56from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap
57from ibllib.io.session_params import read_params, get_sync, get_sync_namespace
58import brainbox.behavior.wheel as wh
59from ibllib.io.video import get_video_meta, get_video_frames_preload, assert_valid_label
60from . import base
62_log = logging.getLogger(__name__)
64try:
65 from labcams import parse_cam_log
66except ImportError:
67 _log.warning('labcams not installed')
70class CameraQC(base.QC):
71 """A class for computing camera QC metrics."""
73 dstypes = [
74 '_ibl_experiment.description',
75 '_iblrig_Camera.frameData', # Replaces the next 3 datasets
76 '_iblrig_Camera.frame_counter',
77 '_iblrig_Camera.GPIO',
78 '_iblrig_Camera.timestamps',
79 '_iblrig_taskData.raw',
80 '_iblrig_taskSettings.raw',
81 '_iblrig_Camera.raw',
82 'camera.times',
83 'wheel.position',
84 'wheel.timestamps'
85 ]
86 dstypes_fpga = [
87 '_spikeglx_sync.channels',
88 '_spikeglx_sync.polarities',
89 '_spikeglx_sync.times',
90 'ephysData.raw.meta'
91 ]
92 """Recall that for the training rig there is only one side camera at 30 Hz and 1280 x 1024 px.
93 For the recording rig there are two label cameras (left: 60 Hz, 1280 x 1024 px;
94 right: 150 Hz, 640 x 512 px) and one body camera (30 Hz, 640 x 512 px). """
95 video_meta = {
96 'training': {
97 'left': {
98 'fps': 30,
99 'width': 1280,
100 'height': 1024
101 }
102 },
103 'ephys': {
104 'left': {
105 'fps': 60,
106 'width': 1280,
107 'height': 1024
108 },
109 'right': {
110 'fps': 150,
111 'width': 640,
112 'height': 512
113 },
114 'body': {
115 'fps': 30,
116 'width': 640,
117 'height': 512
118 },
119 }
120 }
122 def __init__(self, session_path_or_eid, camera, **kwargs):
123 """
124 Compute and view camera QC metrics.
126 :param session_path_or_eid: A session id or path
127 :param camera: The camera to run QC on, if None QC is run for all three cameras
128 :param n_samples: The number of frames to sample for the position and brightness QC
129 :param stream: If true and local video files not available, the data are streamed from
130 the remote source.
131 :param log: A logging.Logger instance, if None the 'ibllib' logger is used
132 :param one: An ONE instance for fetching and setting the QC on Alyx
133 """
134 self.stream = kwargs.pop('stream', None) 1gcadihb
135 self.n_samples = kwargs.pop('n_samples', 100) 1gcadihb
136 self.sync_collection = kwargs.pop('sync_collection', None) 1gcadihb
137 self.sync = kwargs.pop('sync_type', None) 1gcadihb
138 self.protocol = kwargs.pop('protocol', None) 1gcadihb
139 super().__init__(session_path_or_eid, **kwargs) 1gcadihb
141 # Data
142 self.label = assert_valid_label(camera) 1gcadihb
143 filename = f'_iblrig_{self.label}Camera.raw*.mp4' 1gcadihb
144 raw_video_path = self.session_path.joinpath('raw_video_data') 1gcadihb
145 self.video_path = next(raw_video_path.glob(filename), None) 1gcadihb
147 # If local video doesn't exist, change video path to URL
148 if not self.video_path and self.stream is not False and self.one is not None: 1gcadihb
149 try:
150 self.stream = True
151 self.video_path = self.one.path2url(raw_video_path / filename.replace('*', ''))
152 except (StopIteration, ALFObjectNotFound):
153 _log.error('No remote or local video file found')
154 self.video_path = None
156 logging.disable(logging.NOTSET) 1gcadihb
157 keys = ('count', 'pin_state', 'audio', 'fpga_times', 'wheel', 'video', 1gcadihb
158 'frame_samples', 'timestamps', 'camera_times', 'bonsai_times')
159 self.data = Bunch.fromkeys(keys) 1gcadihb
160 self.frame_samples_idx = None 1gcadihb
162 # QC outcomes map
163 self.metrics = None 1gcadihb
164 self.outcome = spec.QC.NOT_SET 1gcadihb
166 # Specify any checks to remove
167 if self.protocol is not None and 'habituation' in self.protocol: 1gcadihb
168 self.checks_to_remove = ['check_wheel_alignment'] 1d
169 else:
170 self.checks_to_remove = [] 1gcaihb
171 self._type = None 1gcadihb
173 @property
174 def type(self):
175 """
176 Returns the camera type based on the protocol.
178 :return: Returns either None, 'ephys' or 'training'
179 """
180 if not self._type: 1uspcadhbf
181 return
182 else:
183 return 'ephys' if 'ephys' in self._type else 'training' 1uspcadhbf
185 def load_data(self, extract_times: bool = False, load_video: bool = True) -> None:
186 """Extract the data from raw data files.
188 Extracts all the required task data from the raw data files. Missing data will raise an
189 AssertionError. TODO This method could instead be moved to CameraExtractor classes.
191 Data keys:
192 - count (int array): the sequential frame number (n, n+1, n+2...)
193 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
194 - audio (float array): timestamps of audio TTL fronts
195 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
196 - timestamps (float array): extracted video timestamps (the camera.times ALF)
197 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
198 - camera_times (float array): camera frame timestamps extracted from frame headers
199 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
200 - video (Bunch): video meta data, including dimensions and FPS
201 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
203 :param extract_times: if True, the camera.times are re-extracted from the raw data
204 :param load_video: if True, calls the load_video_data method
205 """
206 assert self.session_path, 'no session path set' 1cadihb
207 _log.info('Gathering data for QC') 1cadihb
209 # Get frame count and pin state
210 self.data['count'], self.data['pin_state'] = \ 1cadihb
211 raw.load_embedded_frame_data(self.session_path, self.label, raw=True)
213 # If there is an experiment description and there are video parameters
214 sess_params = read_params(self.session_path) or {} 1cadihb
215 task_collection = get_task_collection(sess_params) 1cadihb
216 ns = get_sync_namespace(sess_params) 1cadihb
217 self._set_sync(sess_params) 1cadihb
218 self._update_meta_from_session_params(sess_params) 1cadihb
220 # Load the audio and raw FPGA times
221 sync = chmap = None 1cadihb
222 if self.sync != 'bpod' and self.sync is not None: 1cadihb
223 self.sync_collection = self.sync_collection or 'raw_ephys_data' 1ah
224 ns = ns or 'spikeglx' 1ah
225 if ns == 'spikeglx': 1ah
226 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) 1ah
227 elif ns == 'timeline': 1h
228 sync, chmap = load_timeline_sync_and_chmap(self.session_path / self.sync_collection)
229 else:
230 raise NotImplementedError(f'Unknown namespace "{ns}"') 1h
231 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) 1ah
232 self.data['audio'] = audio_ttls['times'] # Get rises 1ah
233 # Load raw FPGA times
234 cam_ts = camio.extract_camera_sync(sync, chmap) 1ah
235 self.data['fpga_times'] = cam_ts[self.label] 1ah
236 else:
237 self.sync_collection = self.sync_collection or task_collection 1cdihb
238 bpod_data = raw.load_data(self.session_path, task_collection) 1cdihb
239 _, audio_ttls = raw.load_bpod_fronts( 1cdihb
240 self.session_path, data=bpod_data, task_collection=task_collection)
241 self.data['audio'] = audio_ttls['times'] 1cdihb
243 # Load extracted frame times
244 alf_path = self.session_path / 'alf' 1cadihb
245 try: 1cadihb
246 assert not extract_times 1cadihb
247 self.data['timestamps'] = alfio.load_object( 1cih
248 alf_path, f'{self.label}Camera', short_keys=True)['times']
249 except AssertionError: # Re-extract 1adib
250 kwargs = dict(video_path=self.video_path, save=False) 1adb
251 if self.sync == 'bpod': 1adb
252 extractor = camio.CameraTimestampsBpod(self.session_path) 1db
253 kwargs['task_collection'] = task_collection 1db
254 else:
255 kwargs.update(sync=sync, chmap=chmap) 1a
256 extractor = camio.CameraTimestampsFPGA(self.label, self.session_path) 1a
257 self.data['timestamps'] = extractor.extract(**kwargs)[0] 1adb
258 except ALFObjectNotFound: 1i
259 _log.warning('no camera.times ALF found for session') 1i
261 # Get audio and wheel data
262 wheel_keys = ('timestamps', 'position') 1cadihb
263 try: 1cadihb
264 # glob in case wheel data are in sub-collections
265 alf_path = next(alf_path.rglob('*wheel.timestamps*')).parent 1cadihb
266 self.data['wheel'] = alfio.load_object(alf_path, 'wheel', short_keys=True) 1cahb
267 except (StopIteration, ALFObjectNotFound): 1di
268 # Extract from raw data
269 if self.sync != 'bpod' and self.sync is not None: 1di
270 if ns == 'spikeglx':
271 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
272 elif ns == 'timeline':
273 extractor = mesoscope.TimelineTrials(self.session_path, sync_collection=self.sync_collection)
274 wheel_data = extractor.extract_wheel_sync()
275 else:
276 raise NotImplementedError(f'Unknown namespace "{ns}"')
277 else:
278 if self.protocol is not None and 'habituation' in self.protocol: 1di
279 wheel_data = training_wheel.get_wheel_position( 1d
280 self.session_path, task_collection=task_collection)
281 else:
282 wheel_data = [None, None] 1i
284 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) 1di
286 # Find short period of wheel motion for motion correlation.
287 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: 1cadihb
288 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel']) 1cahb
290 # Load Bonsai frame timestamps
291 try: 1cadihb
292 ssv_times = raw.load_camera_ssv_times(self.session_path, self.label) 1cadihb
293 self.data['bonsai_times'], self.data['camera_times'] = ssv_times 1cadihb
294 except AssertionError:
295 _log.warning('No Bonsai video timestamps file found')
297 # Gather information from video file
298 if load_video: 1cadihb
299 _log.info('Inspecting video file...') 1cadib
300 self.load_video_data() 1cadib
302 def load_video_data(self):
303 """Get basic properties of video.
305 Updates the `data` property with video metadata such as length and frame count, as well as
306 loading some frames to perform QC on.
307 """
308 try: 1gcadib
309 self.data['video'] = get_video_meta(self.video_path, one=self.one) 1gcadib
310 # Sample some frames from the video file
311 indices = np.linspace(100, self.data['video'].length - 100, self.n_samples).astype(int) 1gcadib
312 self.frame_samples_idx = indices 1gcadib
313 self.data['frame_samples'] = get_video_frames_preload(self.video_path, indices, 1gcadib
314 mask=np.s_[:, :, 0])
315 except AssertionError:
316 _log.error('Failed to read video file; setting outcome to CRITICAL')
317 self._outcome = spec.QC.CRITICAL
319 @staticmethod
320 def get_active_wheel_period(wheel, duration_range=(3., 20.), display=False):
321 """
322 Find period of active wheel movement.
324 Attempts to find a period of movement where the wheel accelerates and decelerates for the
325 wheel motion alignment QC.
327 :param wheel: A Bunch of wheel timestamps and position data
328 :param duration_range: The candidates must be within min/max duration range
329 :param display: If true, plot the selected wheel movement
330 :return: 2-element array comprising the start and end times of the active period
331 """
332 pos, ts = wh.interpolate_position(wheel.timestamps, wheel.position) 1ncahb
333 v, acc = wh.velocity_filtered(pos, 1000) 1ncahb
334 on, off, *_ = wh.movements(ts, acc, pos_thresh=.1, make_plots=False) 1ncahb
335 edges = np.c_[on, off] 1ncahb
336 indices, _ = np.where(np.logical_and( 1ncahb
337 np.diff(edges) > duration_range[0], np.diff(edges) < duration_range[1]))
338 if len(indices) == 0: 1ncahb
339 _log.warning('No period of wheel movement found for motion alignment.') 1n
340 return None 1n
341 # Pick movement somewhere in the middle
342 i = indices[int(indices.size / 2)] 1cahb
343 if display: 1cahb
344 _, (ax0, ax1) = plt.subplots(2, 1, sharex='all')
345 mask = np.logical_and(ts > edges[i][0], ts < edges[i][1])
346 ax0.plot(ts[mask], pos[mask])
347 ax1.plot(ts[mask], acc[mask])
348 return edges[i] 1cahb
350 def _set_sync(self, session_params=False):
351 """Set the sync and sync_collection attributes if not already set.
353 Also set the type attribute based on the sync. NB The type attribute is for legacy sessions.
355 Parameters
356 ----------
357 session_params : dict, bool
358 The loaded experiment description file. If False, attempts to load it from the session_path.
359 """
360 if session_params is False: 1cadihb
361 if not self.session_path:
362 raise ValueError('No session path set')
363 session_params = read_params(self.session_path)
364 sync, sync_dict = get_sync(session_params) 1cadihb
365 self.sync = self.sync or sync 1cadihb
366 self.sync_collection = self.sync_collection or sync_dict.get('collection') 1cadihb
367 if self.sync: 1cadihb
368 self._type = 'ephys' if self.sync == 'nidq' else 'training' 1cadhb
370 def _update_meta_from_session_params(self, sess_params):
371 """
372 Update the default expected video properties.
374 Use properties defined in the experiment description file, if present. This updates the
375 `video_meta` property with the fps, width and height for the type and camera label.
377 Parameters
378 ----------
379 sess_params : dict
380 The loaded experiment description file.
381 """
382 try: 1cadihb
383 assert sess_params 1cadihb
384 video_pars = sess_params.get('devices', {}).get('cameras', {}).get(self.label) 1cahb
385 except (AssertionError, KeyError): 1di
386 return 1di
387 PROPERTIES = ('width', 'height', 'fps') 1cahb
388 video_meta = copy.deepcopy(self.video_meta) # must re-assign as it's a class attribute 1cahb
389 if self.type not in video_meta: 1cahb
390 video_meta[self.type] = {}
391 if self.label not in video_meta[self.type]: 1cahb
392 video_meta[self.type][self.label] = {}
393 video_meta[self.type][self.label].update( 1cahb
394 **{k: v for k, v in video_pars.items() if k in PROPERTIES}
395 )
396 self.video_meta = video_meta 1cahb
398 def run(self, update: bool = False, **kwargs) -> (str, dict):
399 """
400 Run video QC checks and return outcome.
402 :param update: if True, updates the session QC fields on Alyx
403 :param download_data: if True, downloads any missing data if required
404 :param extract_times: if True, re-extracts the camera timestamps from the raw data
405 :returns: overall outcome as a str, a dict of checks and their outcomes
406 """
407 _log.info(f'Computing QC outcome for {self.label} camera, session {self.eid}') 1cadib
408 namespace = f'video{self.label.capitalize()}' 1cadib
409 if all(x is None for x in self.data.values()): 1cadib
410 self.load_data(**kwargs) 1cai
411 if self.data['frame_samples'] is None or self.data['timestamps'] is None: 1cadib
412 return spec.QC.NOT_SET, {} 1i
413 if self.data['timestamps'].shape[0] == 0: 1cadb
414 _log.error(f'No timestamps for {self.label} camera; setting outcome to CRITICAL')
415 return spec.QC.CRITICAL, {}
417 def is_metric(x): 1cadb
418 return isfunction(x) and x.__name__.startswith('check_') 1cadb
419 # import importlib
420 # classe = getattr(importlib.import_module(self.__module__), self.__name__)
421 # print(classe)
423 checks = getmembers(self.__class__, is_metric) 1cadb
424 checks = self._remove_check(checks) 1cadb
425 self.metrics = {f'_{namespace}_' + k[6:]: fn(self) for k, fn in checks} 1cadb
427 values = [x if isinstance(x, spec.QC) else x[0] for x in self.metrics.values()] 1cadb
428 outcome = max(map(spec.QC.validate, values)) 1cadb
430 if update: 1cadb
431 extended = dict() 1c
432 for k, v in self.metrics.items(): 1c
433 if v is None: 1c
434 extended[k] = spec.QC.NOT_SET.name
435 elif isinstance(v, tuple): 1c
436 extended[k] = tuple(i.name if isinstance(i, spec.QC) else i for i in v) 1c
437 else:
438 extended[k] = v.name 1c
440 self.update_extended_qc(extended) 1c
441 self.update(outcome, namespace) 1c
442 return outcome, self.metrics 1cadb
444 def _remove_check(self, checks):
445 """
446 Remove one or more check functions from QC checklist.
448 Parameters
449 ----------
450 checks : list of tuple
451 The complete list of check function name and handle.
453 Returns
454 -------
455 list of tuple
456 The list of check function name and handle, sans names in `checks_to_remove` property.
457 """
458 if len(self.checks_to_remove) == 0: 1cadb
459 return checks 1cab
460 else:
461 for check in self.checks_to_remove: 1d
462 check_names = [ch[0] for ch in checks] 1d
463 idx = check_names.index(check) 1d
464 checks.pop(idx) 1d
465 return checks 1d
467 def check_brightness(self, bounds=(40, 200), max_std=20, roi=True, display=False):
468 """Check that the video brightness is within a given range.
470 The mean brightness of each frame must be with the bounds provided, and the standard
471 deviation across samples frames should be less than the given value. Assumes that the
472 frame samples are 2D (no colour channels).
474 :param bounds: For each frame, check that: bounds[0] < M < bounds[1],
475 where M = mean(frame). If less than 75% of sample frames outside of these bounds, the
476 outcome is WARNING. If <75% of frames within twice the bounds, the outcome is FAIL.
477 :param max_std: The standard deviation of the frame luminance means must be less than this
478 :param roi: If True, check brightness on ROI of frame
479 :param display: When True the mean frame luminance is plotted against sample frames.
480 The sample frames with the lowest and highest mean luminance are shown.
481 """
482 if self.data['frame_samples'] is None: 1kcadbf
483 return spec.QC.NOT_SET 1k
484 if roi is True: 1kcadbf
485 _, h, w = self.data['frame_samples'].shape 1kcadbf
486 if self.label == 'body': # Latter half 1kcadbf
487 roi = (slice(None), slice(None), slice(int(w / 2), None, None)) 1f
488 elif self.label == 'left': # Top left quadrant (~2/3, 1/2 height) 1kcadbf
489 roi = (slice(None), slice(None, int(h / 2), None), slice(None, int(w * .66), None)) 1kcadbf
490 else: # Top right quadrant (~2/3 width, 1/2 height)
491 roi = (slice(None), slice(None, int(h / 2), None), slice(int(w * .66), None, None)) 1f
492 else:
493 roi = (slice(None), slice(None), slice(None))
494 brightness = self.data['frame_samples'][roi].mean(axis=(1, 2)) 1kcadbf
495 # dims = self.data['frame_samples'].shape
496 # brightness /= np.array((*dims[1:], 255)).prod() # Normalize
498 if display: 1kcadbf
499 f = plt.figure() 1k
500 gs = f.add_gridspec(2, 3) 1k
501 indices = self.frame_samples_idx 1k
502 # Plot mean frame luminance
503 ax = f.add_subplot(gs[:2, :2]) 1k
504 plt.plot(indices, brightness, label='brightness') 1k
505 ax.set( 1k
506 xlabel='frame #',
507 ylabel='brightness (mean pixel)',
508 title='Brightness')
509 ax.hlines(bounds, 0, indices[-1], 1k
510 colors='tab:orange', linestyles=':', label='warning bounds')
511 ax.hlines((bounds[0] / 2, bounds[1] * 2), 0, indices[-1], 1k
512 colors='r', linestyles=':', label='failure bounds')
513 ax.legend() 1k
514 # Plot min-max frames
515 for i, idx in enumerate((np.argmax(brightness), np.argmin(brightness))): 1k
516 a = f.add_subplot(gs[i, 2]) 1k
517 ax.annotate('*', (indices[idx], brightness[idx]), # this is the point to label 1k
518 textcoords='offset points', xytext=(0, 1), ha='center')
519 frame = self.data['frame_samples'][idx][roi[1:]] 1k
520 title = ('min' if i else 'max') + ' mean luminance = %.2f' % brightness[idx] 1k
521 self.imshow(frame, ax=a, title=title) 1k
523 PCT_PASS = .75 # Proportion of sample frames that must pass 1kcadbf
524 # Warning if brightness not within range (3/4 of frames must be between bounds)
525 warn_range = np.logical_and(brightness > bounds[0], brightness < bounds[1]) 1kcadbf
526 warn_range = 'PASS' if sum(warn_range) / self.n_samples > PCT_PASS else 'WARNING' 1kcadbf
527 # Fail if brightness not within twice the range or std less than threshold
528 fail_range = np.logical_and(brightness > bounds[0] / 2, brightness < bounds[1] * 2) 1kcadbf
529 within_range = sum(fail_range) / self.n_samples > PCT_PASS 1kcadbf
530 fail_range = 'PASS' if within_range and np.std(brightness) < max_std else 'FAIL' 1kcadbf
531 return self.overall_outcome([warn_range, fail_range]) 1kcadbf
533 def check_file_headers(self):
534 """Check reported frame rate matches FPGA frame rate."""
535 if None in (self.data['video'], self.video_meta): 1ucadbf
536 return spec.QC.NOT_SET 1u
537 expected = self.video_meta[self.type][self.label] 1ucadbf
538 return spec.QC.PASS if self.data['video']['fps'] == expected['fps'] else spec.QC.FAIL 1ucadbf
540 def check_framerate(self, threshold=1.):
541 """Check camera times match specified frame rate for camera.
543 :param threshold: The maximum absolute difference between timestamp sample rate and video
544 frame rate. NB: Does not take into account dropped frames.
545 """
546 if any(x is None for x in (self.data['timestamps'], self.video_meta)): 1scadb
547 return spec.QC.NOT_SET 1s
548 fps = self.video_meta[self.type][self.label]['fps'] 1scadb
549 Fs = 1 / np.median(np.diff(self.data['timestamps'])) # Approx. frequency of camera 1scadb
550 return spec.QC.PASS if abs(Fs - fps) < threshold else spec.QC.FAIL, float(round(Fs, 3)) 1scadb
552 def check_pin_state(self, display=False):
553 """Check the pin state reflects Bpod TTLs."""
554 if not data_for_keys(('video', 'pin_state', 'audio'), self.data): 1lcadb
555 return spec.QC.NOT_SET 1lc
556 size_diff = int(self.data['pin_state'].shape[0] - self.data['video']['length']) 1ladb
557 # NB: The pin state can be high for 2 consecutive frames
558 low2high = np.insert(np.diff(self.data['pin_state'][:, -1].astype(int)) == 1, 0, False) 1ladb
559 # NB: Time between two consecutive TTLs can be sub-frame, so this will fail
560 ndiff_low2high = int(self.data['audio'][::2].size - sum(low2high)) 1ladb
561 # state_ttl_matches = ndiff_low2high == 0
562 # Check within ms of audio times
563 if display: 1ladb
564 plt.Figure() 1l
565 plt.plot(self.data['timestamps'][low2high], np.zeros(sum(low2high)), 'o', 1l
566 label='GPIO Low -> High')
567 plt.plot(self.data['audio'], np.zeros(self.data['audio'].size), 'rx', 1l
568 label='Audio TTL High')
569 plt.xlabel('FPGA frame times / s') 1l
570 plt.gca().set(yticklabels=[]) 1l
571 plt.gca().tick_params(left=False) 1l
572 plt.legend() 1l
574 outcome = self.overall_outcome( 1ladb
575 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
576 'PASS' if np.abs(ndiff_low2high) < 5 else 'WARNING')
577 )
578 return outcome, ndiff_low2high, size_diff 1ladb
580 def check_dropped_frames(self, threshold=.1):
581 """Check how many frames were reported missing.
583 :param threshold: The maximum allowable percentage of dropped frames
584 """
585 if not data_for_keys(('video', 'count'), self.data): 1mcadb
586 return spec.QC.NOT_SET 1mc
587 size_diff = int(self.data['count'].size - self.data['video']['length']) 1madb
588 strict_increase = np.all(np.diff(self.data['count']) > 0) 1madb
589 if not strict_increase: 1madb
590 n_affected = np.sum(np.invert(strict_increase)) 1m
591 _log.info(f'frame count not strictly increasing: ' 1m
592 f'{n_affected} frames affected ({n_affected / strict_increase.size:.2%})')
593 return spec.QC.CRITICAL 1m
594 dropped = np.diff(self.data['count']).astype(int) - 1 1madb
595 pct_dropped = (sum(dropped) / len(dropped) * 100) 1madb
596 # Calculate overall outcome for this check
597 outcome = self.overall_outcome( 1madb
598 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
599 'PASS' if pct_dropped < threshold else 'FAIL')
600 )
601 return outcome, int(sum(dropped)), size_diff 1madb
603 def check_timestamps(self):
604 """Check that the camera.times array is reasonable."""
605 if not data_for_keys(('timestamps', 'video'), self.data): 1tcadb
606 return spec.QC.NOT_SET 1t
607 # Check number of timestamps matches video
608 length_matches = self.data['timestamps'].size == self.data['video'].length 1tcadb
609 # Check times are strictly increasing
610 increasing = all(np.diff(self.data['timestamps']) > 0) 1tcadb
611 # Check times do not contain nans
612 nanless = not np.isnan(self.data['timestamps']).any() 1tcadb
613 return spec.QC.PASS if increasing and length_matches and nanless else spec.QC.FAIL 1tcadb
615 def check_camera_times(self):
616 """Check that the number of raw camera timestamps matches the number of video frames."""
617 if not data_for_keys(('bonsai_times', 'video'), self.data): 1vcadb
618 return spec.QC.NOT_SET 1v
619 length_match = len(self.data['camera_times']) == self.data['video'].length 1vcadb
620 outcome = spec.QC.PASS if length_match else spec.QC.WARNING 1vcadb
621 # 1 / np.median(np.diff(self.data.camera_times))
622 return outcome, len(self.data['camera_times']) - self.data['video'].length 1vcadb
624 def check_resolution(self):
625 """Check that the timestamps and video file resolution match what we expect."""
626 if self.data['video'] is None: 1pcadbf
627 return spec.QC.NOT_SET 1p
628 actual = self.data['video'] 1pcadbf
629 expected = self.video_meta[self.type][self.label] 1pcadbf
630 match = actual['width'] == expected['width'] and actual['height'] == expected['height'] 1pcadbf
631 return spec.QC.PASS if match else spec.QC.FAIL 1pcadbf
633 def check_wheel_alignment(self, tolerance=(1, 2), display=False):
634 """Check wheel motion in video correlates with the rotary encoder signal.
636 Check is skipped for body camera videos as the wheel is often obstructed
638 Parameters
639 ----------
640 tolerance : int, (int, int)
641 Maximum absolute offset in frames. If two values, the maximum value is taken as the
642 warning threshold.
643 display : bool
644 If true, the wheel motion energy is plotted against the rotary encoder.
646 Returns
647 -------
648 one.alf.spec.QC
649 The outcome, one of {'NOT_SET', 'FAIL', 'WARNING', 'PASS'}.
650 int
651 Frame offset, i.e. by how many frames the video was shifted to match the rotary encoder
652 signal. Negative values mean the video was shifted backwards with respect to the wheel
653 timestamps.
655 Notes
656 -----
657 - A negative frame offset typically means that there were frame TTLs at the beginning that
658 do not correspond to any video frames (sometimes the first few frames aren't saved to
659 disk). Since 2021-09-15 the extractor should compensate for this.
660 """
661 wheel_present = data_for_keys(('position', 'timestamps', 'period'), self.data['wheel']) 1oncab
662 if not wheel_present or self.label == 'body': 1oncab
663 return spec.QC.NOT_SET 1on
665 # Check the selected wheel movement period occurred within camera timestamp time
666 camera_times = self.data['timestamps'] 1ocab
667 in_range = within_ranges(camera_times, self.data['wheel']['period'].reshape(-1, 2)) 1ocab
668 if not in_range.any(): 1ocab
669 # Check if any camera timestamps overlap with the wheel times
670 if np.any(np.logical_and( 1oc
671 camera_times > self.data['wheel']['timestamps'][0],
672 camera_times < self.data['wheel']['timestamps'][-1])
673 ):
674 _log.warning('Unable to check wheel alignment: ' 1oc
675 'chosen movement is not during video')
676 return spec.QC.NOT_SET 1oc
677 else:
678 # No overlap, return fail
679 return spec.QC.FAIL 1o
680 aln = MotionAlignment(self.eid, self.one, self.log, session_path=self.session_path) 1ab
681 aln.data = self.data.copy() 1ab
682 aln.data['camera_times'] = {self.label: camera_times} 1ab
683 aln.video_paths = {self.label: self.video_path} 1ab
684 offset, *_ = aln.align_motion(period=self.data['wheel'].period, 1ab
685 display=display, side=self.label)
686 if offset is None: 1ab
687 return spec.QC.NOT_SET
688 if display: 1ab
689 aln.plot_alignment()
691 # Determine the outcome. If there are two values for the tolerance, one is taken to be
692 # a warning threshold, the other a failure threshold.
693 out_map = {0: spec.QC.WARNING, 1: spec.QC.WARNING, 2: spec.QC.PASS} # 0: FAIL -> WARNING Aug 2022 1ab
694 passed = np.abs(offset) <= np.sort(np.array(tolerance)) 1ab
695 return out_map[sum(passed)], int(offset) 1ab
697 def check_position(self, hist_thresh=(75, 80), pos_thresh=(10, 15),
698 metric=cv2.TM_CCOEFF_NORMED,
699 display=False, test=False, roi=None, pct_thresh=True):
700 """Check camera is positioned correctly.
702 For the template matching zero-normalized cross-correlation (default) should be more
703 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
704 also work.
706 If display is True, the template ROI (pick hashed) is plotted over a video frame,
707 along with the threshold regions (green solid). The histogram correlations are plotted
708 and the full histogram is plotted for one of the sample frames and the reference frame.
710 :param hist_thresh: The minimum histogram cross-correlation threshold to pass (0-1).
711 :param pos_thresh: The maximum number of pixels off that the template matcher may be off
712 by. If two values are provided, the lower threshold is treated as a warning boundary.
713 :param metric: The metric to use for template matching.
714 :param display: If true, the results are plotted
715 :param test: If true a reference frame instead of the frames in frame_samples.
716 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
717 :param pct_thresh: If true, the thresholds are treated as percentages
718 """
719 if not test and self.data['frame_samples'] is None: 1jcadbf
720 return spec.QC.NOT_SET 1j
721 refs = self.load_reference_frames(self.label) 1jcadbf
722 # ensure iterable
723 pos_thresh = np.sort(np.array(pos_thresh)) 1jcadbf
724 hist_thresh = np.sort(np.array(hist_thresh)) 1jcadbf
726 # Method 1: compareHist
727 #### Mean hist comparison
728 # ref_h = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in refs]
729 # ref_h = np.array(ref_h).mean(axis=0)
730 # frames = refs if test else self.data['frame_samples']
731 # hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames]
732 # test_h = np.array(hists).mean(axis=0)
733 # corr = cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL)
734 # if pct_thresh:
735 # corr *= 100
736 # hist_passed = corr > hist_thresh
737 ####
738 ref_h = cv2.calcHist([refs[0]], [0], None, [256], [0, 256]) 1jcadbf
739 frames = refs if test else self.data['frame_samples'] 1jcadbf
740 hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames] 1jcadbf
741 corr = np.array([cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL) for test_h in hists]) 1jcadbf
742 if pct_thresh: 1jcadbf
743 corr *= 100 1jcadbf
744 hist_passed = [np.all(corr > x) for x in hist_thresh] 1jcadbf
746 # Method 2:
747 top_left, roi, template = self.find_face(roi=roi, test=test, metric=metric, refs=refs) 1jcadbf
748 (y1, y2), (x1, x2) = roi 1jcadbf
749 err = (x1, y1) - np.median(np.array(top_left), axis=0) 1jcadbf
750 h, w = frames[0].shape[:2] 1jcadbf
752 if pct_thresh: # Threshold as percent 1jcadbf
753 # t_x, t_y = pct_thresh
754 err_pct = [(abs(x) / y) * 100 for x, y in zip(err, (h, w))] 1jcadbf
755 face_passed = [all(err_pct < x) for x in pos_thresh] 1jcadbf
756 else:
757 face_passed = [np.all(np.abs(err) < x) for x in pos_thresh] 1j
759 if display: 1jcadbf
760 plt.figure() 1j
761 # Plot frame with template overlay
762 img = frames[0] 1j
763 ax0 = plt.subplot(221) 1j
764 ax0.imshow(img, cmap='gray', vmin=0, vmax=255) 1j
765 bounds = (x1 - err[0], x2 - err[0], y2 - err[1], y1 - err[1]) 1j
766 ax0.imshow(template, cmap='gray', alpha=0.5, extent=bounds) 1j
767 if pct_thresh: 1j
768 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1j
769 t_y = (h / 100) * thresh 1j
770 t_x = (w / 100) * thresh 1j
771 xy = (x1 - t_x, y1 - t_y) 1j
772 ax0.add_patch(Rectangle(xy, x2 - x1 + (t_x * 2), y2 - y1 + (t_y * 2), 1j
773 fill=True, facecolor=c, lw=0, alpha=0.05))
774 else:
775 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1j
776 xy = (x1 - thresh, y1 - thresh) 1j
777 ax0.add_patch(Rectangle(xy, x2 - x1 + (thresh * 2), y2 - y1 + (thresh * 2), 1j
778 fill=True, facecolor=c, lw=0, alpha=0.05))
779 xy = (x1 - err[0], y1 - err[1]) 1j
780 ax0.add_patch(Rectangle(xy, x2 - x1, y2 - y1, 1j
781 edgecolor='pink', fill=False, hatch='//', lw=1))
782 ax0.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0)) 1j
783 ax0.set_axis_off() 1j
784 # Plot the image histograms
785 ax1 = plt.subplot(212) 1j
786 ax1.plot(ref_h[5:-1], label='reference frame') 1j
787 ax1.plot(np.array(hists).mean(axis=0)[5:-1], label='mean frame') 1j
788 ax1.set_xlim([0, 256]) 1j
789 plt.legend() 1j
790 # Plot the correlations for each sample frame
791 ax2 = plt.subplot(222) 1j
792 ax2.plot(corr, label='hist correlation') 1j
793 ax2.axhline(hist_thresh[0], 0, self.n_samples, 1j
794 linestyle=':', color='r', label='fail threshold')
795 ax2.axhline(hist_thresh[1], 0, self.n_samples, 1j
796 linestyle=':', color='g', label='pass threshold')
797 ax2.set(xlabel='Sample Frame #', ylabel='Hist correlation') 1j
798 plt.legend() 1j
799 plt.suptitle('Check position') 1j
800 plt.show() 1j
802 pass_map = {i: s for i, s in enumerate(('FAIL', 'WARNING', 'PASS'))} 1jcadbf
803 face_aligned = pass_map[sum(face_passed)] 1jcadbf
804 hist_correlates = pass_map[sum(hist_passed)] 1jcadbf
806 return self.overall_outcome([face_aligned, hist_correlates], agg=min) 1jcadbf
808 def check_focus(self, n=20, threshold=(100, 6),
809 roi=False, display=False, test=False, equalize=True):
810 """Check video is in focus.
812 Two methods are used here: Looking at the high frequencies with a DFT and
813 applying a Laplacian HPF and looking at the variance.
815 Note:
816 - Both methods are sensitive to noise (Laplacian is 2nd order filter).
817 - The thresholds for the fft may need to be different for the left/right vs body as
818 the distribution of frequencies in the image is different (e.g. the holder
819 comprises mostly very high frequencies).
820 - The image may be overall in focus but the places we care about can still be out of
821 focus (namely the face). For this we'll take an ROI around the face.
822 - Focus check thrown off by brightness. This may be fixed by equalizing the histogram
823 (set equalize=True)
825 Parameters
826 ----------
827 n : int
828 Number of frames from frame_samples data to use in check.
829 threshold : tuple of float
830 The lower boundary for Laplacian variance and mean FFT filtered brightness,
831 respectively.
832 roi : bool, None, list of slice
833 If False, the roi is determined via template matching for the face or body.
834 If None, some set ROIs for face and paws are used. A list of slices may also be passed.
835 display : bool
836 If true, the results are displayed.
837 test : bool
838 If true, a set of artificially blurred reference frames are used as the input. This can
839 be used to selecting reasonable thresholds.
840 equalize : bool
841 If true, the histograms of the frames are equalized, resulting in an increased the
842 global contrast and linear CDF. This makes check robust to low light conditions.
844 Returns
845 -------
846 one.spec.QC
847 The QC outcome, either FAIL or PASS.
848 """
849 no_frames = self.data['frame_samples'] is None or len(self.data['frame_samples']) == 0 1ecadbf
850 if not test and no_frames: 1ecadbf
851 return spec.QC.NOT_SET 1e
853 if roi is False: 1ecadbf
854 top_left, roi, _ = self.find_face(test=test) # (y1, y2), (x1, x2) 1ecadbf
855 h, w = map(lambda x: np.diff(x).item(), roi) 1ecadbf
856 x, y = np.median(np.array(top_left), axis=0).round().astype(int) 1ecadbf
857 roi = (np.s_[y: y + h, x: x + w],) 1ecadbf
858 else:
859 ROI = { 1e
860 'left': (np.s_[:400, :561], np.s_[500:, 100:800]), # (face, wheel)
861 'right': (np.s_[:196, 397:], np.s_[221:, 255:]),
862 'body': (np.s_[143:274, 84:433],) # body holder
863 }
864 roi = roi or ROI[self.label] 1e
866 if test: 1ecadbf
867 """In test mode load a reference frame and run it through a normalized box filter with 1e
868 increasing kernel size.
869 """
870 idx = (0,) 1e
871 ref = self.load_reference_frames(self.label)[idx] 1e
872 kernal_sz = np.unique(np.linspace(0, 15, n, dtype=int)) 1e
873 n = kernal_sz.size # Size excluding repeated kernels 1e
874 img = np.empty((n, *ref.shape), dtype=np.uint8) 1e
875 for i, k in enumerate(kernal_sz): 1e
876 img[i] = ref.copy() if k == 0 else cv2.blur(ref, (k, k)) 1e
877 if equalize: 1e
878 [cv2.equalizeHist(x, x) for x in img] 1e
879 if display: 1e
880 # Plot blurred images
881 f, axes = plt.subplots(1, len(kernal_sz)) 1e
882 for ax, ig, k in zip(axes, img, kernal_sz): 1e
883 self.imshow(ig, ax=ax, title='Kernal ({0}, {0})'.format(k or 'None')) 1e
884 f.suptitle('Reference frame with box filter') 1e
885 else:
886 # Sub-sample the frame samples
887 idx = np.unique(np.linspace(0, len(self.data['frame_samples']) - 1, n, dtype=int)) 1ecadbf
888 img = self.data['frame_samples'][idx] 1ecadbf
889 if equalize: 1ecadbf
890 [cv2.equalizeHist(x, x) for x in img] 1ecadbf
892 # A measure of the sharpness effectively taking the second derivative of the image
893 lpc_var = np.empty((min(n, len(img)), len(roi))) 1ecadbf
894 for i, frame in enumerate(img[::-1]): 1ecadbf
895 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1ecadbf
896 lpc_var[i] = [lpc[mask].var() for mask in roi] 1ecadbf
898 if display: 1ecadbf
899 # Plot the first sample image
900 f = plt.figure() 1e
901 gs = f.add_gridspec(len(roi) + 1, 3) 1e
902 f.add_subplot(gs[0:len(roi), 0]) 1e
903 frame = img[0] 1e
904 self.imshow(frame, title=f'Frame #{self.frame_samples_idx[idx[0]]}') 1e
905 # Plot the ROIs with and without filter
906 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1e
907 abs_lpc = cv2.convertScaleAbs(lpc) 1e
908 for i, r in enumerate(roi): 1e
909 f.add_subplot(gs[i, 1]) 1e
910 self.imshow(frame[r], title=f'ROI #{i + 1}') 1e
911 f.add_subplot(gs[i, 2]) 1e
912 self.imshow(abs_lpc[r], title=f'ROI #{i + 1} - Lapacian filter') 1e
913 f.suptitle('Laplacian blur detection') 1e
914 # Plot variance over frames
915 ax = f.add_subplot(gs[len(roi), :]) 1e
916 ln = plt.plot(lpc_var) 1e
917 [l.set_label(f'ROI #{i + 1}') for i, l in enumerate(ln)] 1e
918 ax.axhline(threshold[0], 0, n, linestyle=':', color='r', label='lower threshold') 1e
919 ax.set(xlabel='Frame sample', ylabel='Variance of the Laplacian') 1e
920 plt.tight_layout() 1e
921 plt.legend() 1e
923 # Second test is to highpass with dft
924 h, w = img.shape[1:] 1ecadbf
925 cX, cY = w // 2, h // 2 1ecadbf
926 sz = 60 # Seems to be the magic number for high pass 1ecadbf
927 mask = np.ones((h, w, 2), bool) 1ecadbf
928 mask[cY - sz:cY + sz, cX - sz:cX + sz] = False 1ecadbf
929 filt_mean = np.empty(len(img)) 1ecadbf
930 for i, frame in enumerate(img[::-1]): 1ecadbf
931 dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT) 1ecadbf
932 f_shift = np.fft.fftshift(dft) * mask # Shift & remove low frequencies 1ecadbf
933 f_ishift = np.fft.ifftshift(f_shift) # Shift back 1ecadbf
934 filt_frame = cv2.idft(f_ishift) # Reconstruct 1ecadbf
935 filt_frame = cv2.magnitude(filt_frame[..., 0], filt_frame[..., 1]) 1ecadbf
936 # Re-normalize to 8-bits to make threshold simpler
937 img_back = cv2.normalize(filt_frame, None, alpha=0, beta=256, 1ecadbf
938 norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
939 filt_mean[i] = np.mean(img_back) 1ecadbf
940 if i == len(img) - 1 and display: 1ecadbf
941 # Plot Fourier transforms
942 f = plt.figure() 1e
943 gs = f.add_gridspec(2, 3) 1e
944 self.imshow(img[0], ax=f.add_subplot(gs[0, 0]), title='Original frame') 1e
945 dft_shift = np.fft.fftshift(dft) 1e
946 magnitude = 20 * np.log(cv2.magnitude(dft_shift[..., 0], dft_shift[..., 1])) 1e
947 self.imshow(magnitude, ax=f.add_subplot(gs[0, 1]), title='Magnitude spectrum') 1e
948 self.imshow(img_back, ax=f.add_subplot(gs[0, 2]), title='Filtered frame') 1e
949 ax = f.add_subplot(gs[1, :]) 1e
950 ax.plot(filt_mean) 1e
951 ax.axhline(threshold[1], 0, n, linestyle=':', color='r', label='lower threshold') 1e
952 ax.set(xlabel='Frame sample', ylabel='Mean of filtered frame') 1e
953 f.suptitle('Discrete Fourier Transform') 1e
954 plt.show() 1e
955 passes = np.all(lpc_var > threshold[0]) or np.all(filt_mean > threshold[1]) 1ecadbf
956 return spec.QC.PASS if passes else spec.QC.FAIL 1ecadbf
958 def find_face(self, roi=None, test=False, metric=cv2.TM_CCOEFF_NORMED, refs=None):
959 """Use template matching to find face location in frame.
961 For the template matching zero-normalized cross-correlation (default) should be more
962 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
963 also work. That said, normalizing the histograms works best.
965 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
966 :param test: If True the template is matched against frames that come from the same session
967 :param metric: The metric to use for template matching
968 :param refs: An array of frames to match the template to
970 :returns: (y1, y2), (x1, x2)
971 """
972 ROI = { 1ejcadbf
973 'left': ((45, 346), (138, 501)),
974 'right': ((14, 174), (430, 618)),
975 'body': ((141, 272), (90, 339))
976 }
977 roi = roi or ROI[self.label] 1ejcadbf
978 refs = self.load_reference_frames(self.label) if refs is None else refs 1ejcadbf
980 frames = refs if test else self.data['frame_samples'] 1ejcadbf
981 template = refs[0][tuple(slice(*r) for r in roi)] 1ejcadbf
982 top_left = [] # [(x1, y1), ...] 1ejcadbf
983 for frame in frames: 1ejcadbf
984 res = cv2.matchTemplate(frame, template, metric) 1ejcadbf
985 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) 1ejcadbf
986 # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
987 top_left.append(min_loc if metric < 2 else max_loc) 1ejcadbf
988 # bottom_right = (top_left[0] + w, top_left[1] + h)
989 return top_left, roi, template 1ejcadbf
991 @staticmethod
992 def load_reference_frames(side):
993 """Load some reference frames for a given video.
995 The reference frames are from sessions where the camera was well positioned. The
996 frames are in qc/reference, one file per camera, only one channel per frame. The
997 session eids can be found in qc/reference/frame_src.json
999 :param side: Video label, e.g. 'left'
1000 :return: numpy array of frames with the shape (n, h, w)
1001 """
1002 file = next(Path(__file__).parent.joinpath('reference').glob(f'frames_{side}.npy')) 1kejcadbf
1003 refs = np.load(file) 1kejcadbf
1004 return refs 1kejcadbf
1006 @staticmethod
1007 def imshow(frame, ax=None, title=None, **kwargs):
1008 """plt.imshow with some convenient defaults for greyscale frames."""
1009 h = ax or plt.gca() 1ke
1010 defaults = { 1ke
1011 'cmap': kwargs.pop('cmap', 'gray'),
1012 'vmin': kwargs.pop('vmin', 0),
1013 'vmax': kwargs.pop('vmax', 255)
1014 }
1015 h.imshow(frame, **defaults, **kwargs) 1ke
1016 h.set(title=title) 1ke
1017 h.set_axis_off() 1ke
1018 return ax 1ke
1021class CameraQCCamlog(CameraQC):
1022 """
1023 A class for computing camera QC metrics from camlog data.
1025 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1026 timestamp alignment.
1027 """
1029 dstypes = [
1030 '_iblrig_taskData.raw',
1031 '_iblrig_taskSettings.raw',
1032 '_iblrig_Camera.raw',
1033 'camera.times',
1034 'wheel.position',
1035 'wheel.timestamps'
1036 ]
1037 dstypes_fpga = [
1038 '_spikeglx_sync.channels',
1039 '_spikeglx_sync.polarities',
1040 '_spikeglx_sync.times',
1041 'DAQData.raw.meta',
1042 'DAQData.wiring'
1043 ]
1045 def __init__(self, session_path_or_eid, camera, sync_collection='raw_sync_data', sync_type='nidq', **kwargs):
1046 """Compute camera QC metrics from camlog data.
1048 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1049 timestamp alignment.
1050 """
1051 super().__init__(session_path_or_eid, camera, sync_collection=sync_collection, sync_type=sync_type, **kwargs)
1052 self._type = 'ephys'
1053 self.checks_to_remove = ['check_pin_state']
1055 def load_data(self, extract_times: bool = False, load_video: bool = True, **kwargs) -> None:
1056 """Extract the data from raw data files.
1058 Extracts all the required task data from the raw data files.
1060 Data keys:
1061 - count (int array): the sequential frame number (n, n+1, n+2...)
1062 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
1063 - audio (float array): timestamps of audio TTL fronts
1064 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
1065 - timestamps (float array): extracted video timestamps (the camera.times ALF)
1066 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
1067 - camera_times (float array): camera frame timestamps extracted from frame headers
1068 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
1069 - video (Bunch): video meta data, including dimensions and FPS
1070 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
1072 Missing data will raise an AssertionError
1073 :param extract_times: if True, the camera.times are re-extracted from the raw data
1074 :param load_video: if True, calls the load_video_data method
1075 """
1076 assert self.session_path, 'no session path set'
1077 _log.info('Gathering data for QC')
1079 # If there is an experiment description and there are video parameters
1080 sess_params = read_params(self.session_path) or {}
1081 video_collection = get_video_collection(sess_params, self.label)
1082 task_collection = get_task_collection(sess_params)
1083 self._set_sync(sess_params)
1084 self._update_meta_from_session_params(sess_params)
1086 # Get frame count
1087 log, _ = parse_cam_log(self.session_path.joinpath(video_collection, f'_iblrig_{self.label}Camera.raw.camlog'))
1088 self.data['count'] = log.frame_id.values
1090 # Load the audio and raw FPGA times
1091 sync = chmap = None
1092 if self.sync != 'bpod' and self.sync is not None:
1093 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection)
1094 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio'])
1095 self.data['audio'] = audio_ttls['times'] # Get rises
1096 # Load raw FPGA times
1097 cam_ts = camio.extract_camera_sync(sync, chmap)
1098 self.data['fpga_times'] = cam_ts[self.label]
1099 else:
1100 bpod_data = raw.load_data(self.session_path, task_collection=task_collection)
1101 _, audio_ttls = raw.load_bpod_fronts(self.session_path, data=bpod_data, task_collection=task_collection)
1102 self.data['audio'] = audio_ttls['times']
1104 # Load extracted frame times
1105 alf_path = self.session_path / 'alf'
1106 try:
1107 assert not extract_times
1108 cam_path = next(alf_path.rglob(f'*{self.label}Camera.times*')).parent
1109 self.data['timestamps'] = alfio.load_object(
1110 cam_path, f'{self.label}Camera', short_keys=True)['times']
1111 except AssertionError: # Re-extract
1112 kwargs = dict(video_path=self.video_path, save=False)
1113 if self.sync == 'bpod':
1114 extractor = camio.CameraTimestampsBpod(self.session_path, task_collection=task_collection)
1115 else:
1116 kwargs.update(sync=sync, chmap=chmap)
1117 extractor = camio.CameraTimestampsCamlog(self.label, self.session_path)
1118 outputs, _ = extractor.extract(**kwargs)
1119 self.data['timestamps'] = outputs[f'{self.label}_camera_timestamps']
1120 except ALFObjectNotFound:
1121 _log.warning('no camera.times ALF found for session')
1123 # Get audio and wheel data
1124 wheel_keys = ('timestamps', 'position')
1125 try:
1126 # glob in case wheel data are in sub-collections
1127 wheel_path = next(alf_path.rglob('*wheel.timestamps*')).parent
1128 self.data['wheel'] = alfio.load_object(wheel_path, 'wheel', short_keys=True)
1129 except ALFObjectNotFound:
1130 # Extract from raw data
1131 if self.sync != 'bpod':
1132 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
1133 else:
1134 wheel_data = training_wheel.get_wheel_position(self.session_path, task_collection=task_collection)
1135 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))
1137 # Find short period of wheel motion for motion correlation.
1138 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None:
1139 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel'])
1141 # load in camera times
1142 self.data['camera_times'] = log.timestamp.values
1144 # Gather information from video file
1145 if load_video:
1146 _log.info('Inspecting video file...')
1147 self.load_video_data()
1149 def check_camera_times(self):
1150 """Check that the number of raw camera timestamps matches the number of video frames."""
1151 if not data_for_keys(('camera_times', 'video'), self.data):
1152 return spec.QC.NOT_SET
1153 length_match = len(self.data['camera_times']) == self.data['video'].length
1154 outcome = spec.QC.PASS if length_match else spec.QC.WARNING
1155 # 1 / np.median(np.diff(self.data.camera_times))
1156 return outcome, len(self.data['camera_times']) - self.data['video'].length
1159def data_for_keys(keys, data):
1160 """Check keys exist in 'data' dict and contain values other than None."""
1161 return data is not None and all(k in data and data.get(k, None) is not None for k in keys) 1vmltoncadihb
1164def get_task_collection(sess_params):
1165 """
1166 Return the first non-passive task collection.
1168 Returns the first task collection from the experiment description whose task name does not
1169 contain 'passive', otherwise returns 'raw_behavior_data'.
1171 Parameters
1172 ----------
1173 sess_params : dict
1174 The loaded experiment description file.
1176 Returns
1177 -------
1178 str:
1179 The collection presumed to contain wheel data.
1180 """
1181 sess_params = sess_params or {} 1wcadihb
1182 tasks = (chain(*map(dict.items, sess_params.get('tasks', [])))) 1wcadihb
1183 return next((v['collection'] for k, v in tasks if 'passive' not in k), 'raw_behavior_data') 1wcadihb
1186def get_video_collection(sess_params, label):
1187 """
1188 Return the collection containing the raw video data for a given camera.
1190 Parameters
1191 ----------
1192 sess_params : dict
1193 The loaded experiment description file.
1194 label : str
1195 The camera label.
1197 Returns
1198 -------
1199 str:
1200 The collection presumed to contain the video data.
1201 """
1202 DEFAULT = 'raw_video_data'
1203 value = sess_params or {}
1204 for key in ('devices', 'cameras', label, 'collection'):
1205 value = value.get(key)
1206 if not value:
1207 return DEFAULT
1208 return value
1211def run_all_qc(session, cameras=('left', 'right', 'body'), **kwargs):
1212 """Run QC for all cameras.
1214 Run the camera QC for left, right and body cameras.
1216 :param session: A session path or eid.
1217 :param update: If True, QC fields are updated on Alyx.
1218 :param cameras: A list of camera names to perform QC on.
1219 :param stream: If true and local video files not available, the data are streamed from
1220 the remote source.
1221 :return: dict of CameraCQ objects
1222 """
1223 qc = {} 1qra
1224 camlog = kwargs.pop('camlog', False) 1qra
1225 CamQC = CameraQCCamlog if camlog else CameraQC 1qra
1227 run_args = {k: kwargs.pop(k) for k in ('extract_times', 'update') 1qra
1228 if k in kwargs.keys()}
1229 for camera in cameras: 1qra
1230 qc[camera] = CamQC(session, camera, **kwargs) 1qra
1231 qc[camera].run(**run_args) 1qra
1232 return qc 1qra