Coverage for ibllib/qc/camera.py: 82%
586 statements
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-17 15:25 +0000
« prev ^ index » next coverage.py v7.7.0, created at 2025-03-17 15:25 +0000
1"""Video quality control.
3This module runs a list of quality control metrics on the camera and extracted video data.
5Examples
6--------
7Run right camera QC, downloading all but video file
9>>> qc = CameraQC(eid, 'right', download_data=True, stream=True)
10>>> qc.run()
12Run left camera QC with session path, update QC field in Alyx
14>>> qc = CameraQC(session_path, 'left')
15>>> outcome, extended = qc.run(update=True) # Returns outcome of videoQC only
16>>> print(f'video QC = {outcome}; overall session QC = {qc.outcome}') # NB difference outcomes
18Run only video QC (no timestamp/alignment checks) on 20 frames for the body camera
20>>> qc = CameraQC(eid, 'body', n_samples=20)
21>>> qc.load_video_data() # Quicker than loading all data
22>>> qc.run()
24Run specific video QC check and display the plots
26>>> qc = CameraQC(eid, 'left')
27>>> qc.load_data(download_data=True)
28>>> qc.check_position(display=True) # NB: Not all checks make plots
30Run the QC for all cameras
32>>> qcs = run_all_qc(eid)
33>>> qcs['left'].metrics # Dict of checks and outcomes for left camera
34"""
35import logging
36from inspect import getmembers, isfunction
37from pathlib import Path
38from itertools import chain
39import copy
41import cv2
42import numpy as np
43import matplotlib.pyplot as plt
44from matplotlib.patches import Rectangle
46import one.alf.io as alfio
47from one.alf import spec
48from one.alf.exceptions import ALFObjectNotFound
49from iblutil.util import Bunch
50from iblutil.numerical import within_ranges
52import ibllib.io.extractors.camera as camio
53from ibllib.io.extractors import ephys_fpga, training_wheel, mesoscope
54from ibllib.io.extractors.video_motion import MotionAlignment
55from ibllib.io import raw_data_loaders as raw
56from ibllib.io.raw_daq_loaders import load_timeline_sync_and_chmap
57from ibllib.io.session_params import read_params, get_sync, get_sync_namespace
58import brainbox.behavior.wheel as wh
59from ibllib.io.video import get_video_meta, get_video_frames_preload, assert_valid_label
60from . import base
62_log = logging.getLogger(__name__)
64try:
65 from labcams import parse_cam_log
66except ImportError:
67 _log.warning('labcams not installed')
70class CameraQC(base.QC):
71 """A class for computing camera QC metrics."""
73 dstypes = [
74 '_ibl_experiment.description',
75 '_iblrig_Camera.frameData', # Replaces the next 3 datasets
76 '_iblrig_Camera.frame_counter',
77 '_iblrig_Camera.GPIO',
78 '_iblrig_Camera.timestamps',
79 '_iblrig_taskData.raw',
80 '_iblrig_taskSettings.raw',
81 '_iblrig_Camera.raw',
82 'camera.times',
83 'wheel.position',
84 'wheel.timestamps'
85 ]
86 dstypes_fpga = [
87 '_spikeglx_sync.channels',
88 '_spikeglx_sync.polarities',
89 '_spikeglx_sync.times',
90 'ephysData.raw.meta'
91 ]
92 """Recall that for the training rig there is only one side camera at 30 Hz and 1280 x 1024 px.
93 For the recording rig there are two label cameras (left: 60 Hz, 1280 x 1024 px;
94 right: 150 Hz, 640 x 512 px) and one body camera (30 Hz, 640 x 512 px). """
95 video_meta = {
96 'training': {
97 'left': {
98 'fps': 30,
99 'width': 1280,
100 'height': 1024
101 }
102 },
103 'ephys': {
104 'left': {
105 'fps': 60,
106 'width': 1280,
107 'height': 1024
108 },
109 'right': {
110 'fps': 150,
111 'width': 640,
112 'height': 512
113 },
114 'body': {
115 'fps': 30,
116 'width': 640,
117 'height': 512
118 },
119 }
120 }
122 def __init__(self, session_path_or_eid, camera, **kwargs):
123 """
124 Compute and view camera QC metrics.
126 :param session_path_or_eid: A session id or path
127 :param camera: The camera to run QC on, if None QC is run for all three cameras
128 :param n_samples: The number of frames to sample for the position and brightness QC
129 :param stream: If true and local video files not available, the data are streamed from
130 the remote source.
131 :param log: A logging.Logger instance, if None the 'ibllib' logger is used
132 :param one: An ONE instance for fetching and setting the QC on Alyx
133 """
134 self.stream = kwargs.pop('stream', None) 1fcahgb
135 self.n_samples = kwargs.pop('n_samples', 100) 1fcahgb
136 self.sync_collection = kwargs.pop('sync_collection', None) 1fcahgb
137 self.sync = kwargs.pop('sync_type', None) 1fcahgb
138 super().__init__(session_path_or_eid, **kwargs) 1fcahgb
140 # Data
141 self.label = assert_valid_label(camera) 1fcahgb
142 filename = f'_iblrig_{self.label}Camera.raw*.mp4' 1fcahgb
143 raw_video_path = self.session_path.joinpath('raw_video_data') 1fcahgb
144 self.video_path = next(raw_video_path.glob(filename), None) 1fcahgb
146 # If local video doesn't exist, change video path to URL
147 if not self.video_path and self.stream is not False and self.one is not None: 1fcahgb
148 try:
149 self.stream = True
150 self.video_path = self.one.path2url(raw_video_path / filename.replace('*', ''))
151 except (StopIteration, ALFObjectNotFound):
152 _log.error('No remote or local video file found')
153 self.video_path = None
155 logging.disable(logging.NOTSET) 1fcahgb
156 keys = ('count', 'pin_state', 'audio', 'fpga_times', 'wheel', 'video', 1fcahgb
157 'frame_samples', 'timestamps', 'camera_times', 'bonsai_times')
158 self.data = Bunch.fromkeys(keys) 1fcahgb
159 self.frame_samples_idx = None 1fcahgb
161 # QC outcomes map
162 self.metrics = None 1fcahgb
163 self.outcome = spec.QC.NOT_SET 1fcahgb
165 # Specify any checks to remove
166 self.checks_to_remove = [] 1fcahgb
167 self._type = None 1fcahgb
169 @property
170 def type(self):
171 """
172 Returns the camera type based on the protocol.
174 :return: Returns either None, 'ephys' or 'training'
175 """
176 if not self._type: 1trocagbe
177 return
178 else:
179 return 'ephys' if 'ephys' in self._type else 'training' 1trocagbe
181 def load_data(self, extract_times: bool = False, load_video: bool = True) -> None:
182 """Extract the data from raw data files.
184 Extracts all the required task data from the raw data files. Missing data will raise an
185 AssertionError. TODO This method could instead be moved to CameraExtractor classes.
187 Data keys:
188 - count (int array): the sequential frame number (n, n+1, n+2...)
189 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
190 - audio (float array): timestamps of audio TTL fronts
191 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
192 - timestamps (float array): extracted video timestamps (the camera.times ALF)
193 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
194 - camera_times (float array): camera frame timestamps extracted from frame headers
195 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
196 - video (Bunch): video meta data, including dimensions and FPS
197 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
199 :param extract_times: if True, the camera.times are re-extracted from the raw data
200 :param load_video: if True, calls the load_video_data method
201 """
202 assert self.session_path, 'no session path set' 1cahgb
203 _log.info('Gathering data for QC') 1cahgb
205 # Get frame count and pin state
206 self.data['count'], self.data['pin_state'] = \ 1cahgb
207 raw.load_embedded_frame_data(self.session_path, self.label, raw=True)
209 # If there is an experiment description and there are video parameters
210 sess_params = read_params(self.session_path) or {} 1cahgb
211 task_collection = get_task_collection(sess_params) 1cahgb
212 ns = get_sync_namespace(sess_params) 1cahgb
213 self._set_sync(sess_params) 1cahgb
214 self._update_meta_from_session_params(sess_params) 1cahgb
216 # Load the audio and raw FPGA times
217 sync = chmap = None 1cahgb
218 if self.sync != 'bpod' and self.sync is not None: 1cahgb
219 self.sync_collection = self.sync_collection or 'raw_ephys_data' 1ag
220 ns = ns or 'spikeglx' 1ag
221 if ns == 'spikeglx': 1ag
222 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection) 1ag
223 elif ns == 'timeline': 1g
224 sync, chmap = load_timeline_sync_and_chmap(self.session_path / self.sync_collection)
225 else:
226 raise NotImplementedError(f'Unknown namespace "{ns}"') 1g
227 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio']) 1ag
228 self.data['audio'] = audio_ttls['times'] # Get rises 1ag
229 # Load raw FPGA times
230 cam_ts = camio.extract_camera_sync(sync, chmap) 1ag
231 self.data['fpga_times'] = cam_ts[self.label] 1ag
232 else:
233 self.sync_collection = self.sync_collection or task_collection 1chgb
234 bpod_data = raw.load_data(self.session_path, task_collection) 1chgb
235 _, audio_ttls = raw.load_bpod_fronts( 1chgb
236 self.session_path, data=bpod_data, task_collection=task_collection)
237 self.data['audio'] = audio_ttls['times'] 1chgb
239 # Load extracted frame times
240 alf_path = self.session_path / 'alf' 1cahgb
241 try: 1cahgb
242 assert not extract_times 1cahgb
243 self.data['timestamps'] = alfio.load_object( 1chg
244 alf_path, f'{self.label}Camera', short_keys=True)['times']
245 except AssertionError: # Re-extract 1ahb
246 kwargs = dict(video_path=self.video_path, save=False) 1ab
247 if self.sync == 'bpod': 1ab
248 extractor = camio.CameraTimestampsBpod(self.session_path) 1b
249 kwargs['task_collection'] = task_collection 1b
250 else:
251 kwargs.update(sync=sync, chmap=chmap) 1a
252 extractor = camio.CameraTimestampsFPGA(self.label, self.session_path) 1a
253 self.data['timestamps'] = extractor.extract(**kwargs)[0] 1ab
254 except ALFObjectNotFound: 1h
255 _log.warning('no camera.times ALF found for session') 1h
257 # Get audio and wheel data
258 wheel_keys = ('timestamps', 'position') 1cahgb
259 try: 1cahgb
260 # glob in case wheel data are in sub-collections
261 alf_path = next(alf_path.rglob('*wheel.timestamps*')).parent 1cahgb
262 self.data['wheel'] = alfio.load_object(alf_path, 'wheel', short_keys=True) 1cagb
263 except (StopIteration, ALFObjectNotFound): 1h
264 # Extract from raw data
265 if self.sync != 'bpod' and self.sync is not None: 1h
266 if ns == 'spikeglx':
267 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
268 elif ns == 'timeline':
269 extractor = mesoscope.TimelineTrials(self.session_path, sync_collection=self.sync_collection)
270 wheel_data = extractor.extract_wheel_sync()
271 else:
272 raise NotImplementedError(f'Unknown namespace "{ns}"')
273 else:
274 wheel_data = training_wheel.get_wheel_position( 1h
275 self.session_path, task_collection=task_collection)
276 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data)) 1h
278 # Find short period of wheel motion for motion correlation.
279 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None: 1cahgb
280 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel']) 1cagb
282 # Load Bonsai frame timestamps
283 try: 1cahgb
284 ssv_times = raw.load_camera_ssv_times(self.session_path, self.label) 1cahgb
285 self.data['bonsai_times'], self.data['camera_times'] = ssv_times 1cahgb
286 except AssertionError:
287 _log.warning('No Bonsai video timestamps file found')
289 # Gather information from video file
290 if load_video: 1cahgb
291 _log.info('Inspecting video file...') 1cahb
292 self.load_video_data() 1cahb
294 def load_video_data(self):
295 """Get basic properties of video.
297 Updates the `data` property with video metadata such as length and frame count, as well as
298 loading some frames to perform QC on.
299 """
300 try: 1fcahb
301 self.data['video'] = get_video_meta(self.video_path, one=self.one) 1fcahb
302 # Sample some frames from the video file
303 indices = np.linspace(100, self.data['video'].length - 100, self.n_samples).astype(int) 1fcahb
304 self.frame_samples_idx = indices 1fcahb
305 self.data['frame_samples'] = get_video_frames_preload(self.video_path, indices, 1fcahb
306 mask=np.s_[:, :, 0])
307 except AssertionError:
308 _log.error('Failed to read video file; setting outcome to CRITICAL')
309 self._outcome = spec.QC.CRITICAL
311 @staticmethod
312 def get_active_wheel_period(wheel, duration_range=(3., 20.), display=False):
313 """
314 Find period of active wheel movement.
316 Attempts to find a period of movement where the wheel accelerates and decelerates for the
317 wheel motion alignment QC.
319 :param wheel: A Bunch of wheel timestamps and position data
320 :param duration_range: The candidates must be within min/max duration range
321 :param display: If true, plot the selected wheel movement
322 :return: 2-element array comprising the start and end times of the active period
323 """
324 pos, ts = wh.interpolate_position(wheel.timestamps, wheel.position) 1mcagb
325 v, acc = wh.velocity_filtered(pos, 1000) 1mcagb
326 on, off, *_ = wh.movements(ts, acc, pos_thresh=.1, make_plots=False) 1mcagb
327 edges = np.c_[on, off] 1mcagb
328 indices, _ = np.where(np.logical_and( 1mcagb
329 np.diff(edges) > duration_range[0], np.diff(edges) < duration_range[1]))
330 if len(indices) == 0: 1mcagb
331 _log.warning('No period of wheel movement found for motion alignment.') 1m
332 return None 1m
333 # Pick movement somewhere in the middle
334 i = indices[int(indices.size / 2)] 1cagb
335 if display: 1cagb
336 _, (ax0, ax1) = plt.subplots(2, 1, sharex='all')
337 mask = np.logical_and(ts > edges[i][0], ts < edges[i][1])
338 ax0.plot(ts[mask], pos[mask])
339 ax1.plot(ts[mask], acc[mask])
340 return edges[i] 1cagb
342 def _set_sync(self, session_params=False):
343 """Set the sync and sync_collection attributes if not already set.
345 Also set the type attribute based on the sync. NB The type attribute is for legacy sessions.
347 Parameters
348 ----------
349 session_params : dict, bool
350 The loaded experiment description file. If False, attempts to load it from the session_path.
351 """
352 if session_params is False: 1cahgb
353 if not self.session_path:
354 raise ValueError('No session path set')
355 session_params = read_params(self.session_path)
356 sync, sync_dict = get_sync(session_params) 1cahgb
357 self.sync = self.sync or sync 1cahgb
358 self.sync_collection = self.sync_collection or sync_dict.get('collection') 1cahgb
359 if self.sync: 1cahgb
360 self._type = 'ephys' if self.sync == 'nidq' else 'training' 1cagb
362 def _update_meta_from_session_params(self, sess_params):
363 """
364 Update the default expected video properties.
366 Use properties defined in the experiment description file, if present. This updates the
367 `video_meta` property with the fps, width and height for the type and camera label.
369 Parameters
370 ----------
371 sess_params : dict
372 The loaded experiment description file.
373 """
374 try: 1cahgb
375 assert sess_params 1cahgb
376 video_pars = sess_params.get('devices', {}).get('cameras', {}).get(self.label) 1cagb
377 except (AssertionError, KeyError): 1h
378 return 1h
379 PROPERTIES = ('width', 'height', 'fps') 1cagb
380 video_meta = copy.deepcopy(self.video_meta) # must re-assign as it's a class attribute 1cagb
381 if self.type not in video_meta: 1cagb
382 video_meta[self.type] = {}
383 if self.label not in video_meta[self.type]: 1cagb
384 video_meta[self.type][self.label] = {}
385 video_meta[self.type][self.label].update( 1cagb
386 **{k: v for k, v in video_pars.items() if k in PROPERTIES}
387 )
388 self.video_meta = video_meta 1cagb
390 def run(self, update: bool = False, **kwargs) -> (str, dict):
391 """
392 Run video QC checks and return outcome.
394 :param update: if True, updates the session QC fields on Alyx
395 :param download_data: if True, downloads any missing data if required
396 :param extract_times: if True, re-extracts the camera timestamps from the raw data
397 :returns: overall outcome as a str, a dict of checks and their outcomes
398 """
399 _log.info(f'Computing QC outcome for {self.label} camera, session {self.eid}') 1cahb
400 namespace = f'video{self.label.capitalize()}' 1cahb
401 if all(x is None for x in self.data.values()): 1cahb
402 self.load_data(**kwargs) 1cah
403 if self.data['frame_samples'] is None or self.data['timestamps'] is None: 1cahb
404 return spec.QC.NOT_SET, {} 1h
405 if self.data['timestamps'].shape[0] == 0: 1cab
406 _log.error(f'No timestamps for {self.label} camera; setting outcome to CRITICAL')
407 return spec.QC.CRITICAL, {}
409 def is_metric(x): 1cab
410 return isfunction(x) and x.__name__.startswith('check_') 1cab
411 # import importlib
412 # classe = getattr(importlib.import_module(self.__module__), self.__name__)
413 # print(classe)
415 checks = getmembers(self.__class__, is_metric) 1cab
416 checks = self._remove_check(checks) 1cab
417 self.metrics = {f'_{namespace}_' + k[6:]: fn(self) for k, fn in checks} 1cab
419 values = [x if isinstance(x, spec.QC) else x[0] for x in self.metrics.values()] 1cab
420 outcome = max(map(spec.QC.validate, values)) 1cab
422 if update: 1cab
423 extended = dict() 1c
424 for k, v in self.metrics.items(): 1c
425 if v is None: 1c
426 extended[k] = spec.QC.NOT_SET.name
427 elif isinstance(v, tuple): 1c
428 extended[k] = tuple(i.name if isinstance(i, spec.QC) else i for i in v) 1c
429 else:
430 extended[k] = v.name 1c
432 self.update_extended_qc(extended) 1c
433 self.update(outcome, namespace) 1c
434 return outcome, self.metrics 1cab
436 def _remove_check(self, checks):
437 """
438 Remove one or more check functions from QC checklist.
440 Parameters
441 ----------
442 checks : list of tuple
443 The complete list of check function name and handle.
445 Returns
446 -------
447 list of tuple
448 The list of check function name and handle, sans names in `checks_to_remove` property.
449 """
450 if len(self.checks_to_remove) == 0: 1cab
451 return checks 1cab
452 else:
453 for check in self.checks_to_remove:
454 check_names = [ch[0] for ch in checks]
455 idx = check_names.index(check)
456 checks.pop(idx)
457 return checks
459 def check_brightness(self, bounds=(40, 200), max_std=20, roi=True, display=False):
460 """Check that the video brightness is within a given range.
462 The mean brightness of each frame must be with the bounds provided, and the standard
463 deviation across samples frames should be less than the given value. Assumes that the
464 frame samples are 2D (no colour channels).
466 :param bounds: For each frame, check that: bounds[0] < M < bounds[1],
467 where M = mean(frame). If less than 75% of sample frames outside of these bounds, the
468 outcome is WARNING. If <75% of frames within twice the bounds, the outcome is FAIL.
469 :param max_std: The standard deviation of the frame luminance means must be less than this
470 :param roi: If True, check brightness on ROI of frame
471 :param display: When True the mean frame luminance is plotted against sample frames.
472 The sample frames with the lowest and highest mean luminance are shown.
473 """
474 if self.data['frame_samples'] is None: 1jcabe
475 return spec.QC.NOT_SET 1j
476 if roi is True: 1jcabe
477 _, h, w = self.data['frame_samples'].shape 1jcabe
478 if self.label == 'body': # Latter half 1jcabe
479 roi = (slice(None), slice(None), slice(int(w / 2), None, None)) 1e
480 elif self.label == 'left': # Top left quadrant (~2/3, 1/2 height) 1jcabe
481 roi = (slice(None), slice(None, int(h / 2), None), slice(None, int(w * .66), None)) 1jcabe
482 else: # Top right quadrant (~2/3 width, 1/2 height)
483 roi = (slice(None), slice(None, int(h / 2), None), slice(int(w * .66), None, None)) 1e
484 else:
485 roi = (slice(None), slice(None), slice(None))
486 brightness = self.data['frame_samples'][roi].mean(axis=(1, 2)) 1jcabe
487 # dims = self.data['frame_samples'].shape
488 # brightness /= np.array((*dims[1:], 255)).prod() # Normalize
490 if display: 1jcabe
491 f = plt.figure() 1j
492 gs = f.add_gridspec(2, 3) 1j
493 indices = self.frame_samples_idx 1j
494 # Plot mean frame luminance
495 ax = f.add_subplot(gs[:2, :2]) 1j
496 plt.plot(indices, brightness, label='brightness') 1j
497 ax.set( 1j
498 xlabel='frame #',
499 ylabel='brightness (mean pixel)',
500 title='Brightness')
501 ax.hlines(bounds, 0, indices[-1], 1j
502 colors='tab:orange', linestyles=':', label='warning bounds')
503 ax.hlines((bounds[0] / 2, bounds[1] * 2), 0, indices[-1], 1j
504 colors='r', linestyles=':', label='failure bounds')
505 ax.legend() 1j
506 # Plot min-max frames
507 for i, idx in enumerate((np.argmax(brightness), np.argmin(brightness))): 1j
508 a = f.add_subplot(gs[i, 2]) 1j
509 ax.annotate('*', (indices[idx], brightness[idx]), # this is the point to label 1j
510 textcoords='offset points', xytext=(0, 1), ha='center')
511 frame = self.data['frame_samples'][idx][roi[1:]] 1j
512 title = ('min' if i else 'max') + ' mean luminance = %.2f' % brightness[idx] 1j
513 self.imshow(frame, ax=a, title=title) 1j
515 PCT_PASS = .75 # Proportion of sample frames that must pass 1jcabe
516 # Warning if brightness not within range (3/4 of frames must be between bounds)
517 warn_range = np.logical_and(brightness > bounds[0], brightness < bounds[1]) 1jcabe
518 warn_range = 'PASS' if sum(warn_range) / self.n_samples > PCT_PASS else 'WARNING' 1jcabe
519 # Fail if brightness not within twice the range or std less than threshold
520 fail_range = np.logical_and(brightness > bounds[0] / 2, brightness < bounds[1] * 2) 1jcabe
521 within_range = sum(fail_range) / self.n_samples > PCT_PASS 1jcabe
522 fail_range = 'PASS' if within_range and np.std(brightness) < max_std else 'FAIL' 1jcabe
523 return self.overall_outcome([warn_range, fail_range]) 1jcabe
525 def check_file_headers(self):
526 """Check reported frame rate matches FPGA frame rate."""
527 if None in (self.data['video'], self.video_meta): 1tcabe
528 return spec.QC.NOT_SET 1t
529 expected = self.video_meta[self.type][self.label] 1tcabe
530 return spec.QC.PASS if self.data['video']['fps'] == expected['fps'] else spec.QC.FAIL 1tcabe
532 def check_framerate(self, threshold=1.):
533 """Check camera times match specified frame rate for camera.
535 :param threshold: The maximum absolute difference between timestamp sample rate and video
536 frame rate. NB: Does not take into account dropped frames.
537 """
538 if any(x is None for x in (self.data['timestamps'], self.video_meta)): 1rcab
539 return spec.QC.NOT_SET 1r
540 fps = self.video_meta[self.type][self.label]['fps'] 1rcab
541 Fs = 1 / np.median(np.diff(self.data['timestamps'])) # Approx. frequency of camera 1rcab
542 return spec.QC.PASS if abs(Fs - fps) < threshold else spec.QC.FAIL, float(round(Fs, 3)) 1rcab
544 def check_pin_state(self, display=False):
545 """Check the pin state reflects Bpod TTLs."""
546 if not data_for_keys(('video', 'pin_state', 'audio'), self.data): 1kcab
547 return spec.QC.NOT_SET 1kc
548 size_diff = int(self.data['pin_state'].shape[0] - self.data['video']['length']) 1kab
549 # NB: The pin state can be high for 2 consecutive frames
550 low2high = np.insert(np.diff(self.data['pin_state'][:, -1].astype(int)) == 1, 0, False) 1kab
551 # NB: Time between two consecutive TTLs can be sub-frame, so this will fail
552 ndiff_low2high = int(self.data['audio'][::2].size - sum(low2high)) 1kab
553 # state_ttl_matches = ndiff_low2high == 0
554 # Check within ms of audio times
555 if display: 1kab
556 plt.Figure() 1k
557 plt.plot(self.data['timestamps'][low2high], np.zeros(sum(low2high)), 'o', 1k
558 label='GPIO Low -> High')
559 plt.plot(self.data['audio'], np.zeros(self.data['audio'].size), 'rx', 1k
560 label='Audio TTL High')
561 plt.xlabel('FPGA frame times / s') 1k
562 plt.gca().set(yticklabels=[]) 1k
563 plt.gca().tick_params(left=False) 1k
564 plt.legend() 1k
566 outcome = self.overall_outcome( 1kab
567 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
568 'PASS' if np.abs(ndiff_low2high) < 5 else 'WARNING')
569 )
570 return outcome, ndiff_low2high, size_diff 1kab
572 def check_dropped_frames(self, threshold=.1):
573 """Check how many frames were reported missing.
575 :param threshold: The maximum allowable percentage of dropped frames
576 """
577 if not data_for_keys(('video', 'count'), self.data): 1lcab
578 return spec.QC.NOT_SET 1lc
579 size_diff = int(self.data['count'].size - self.data['video']['length']) 1lab
580 strict_increase = np.all(np.diff(self.data['count']) > 0) 1lab
581 if not strict_increase: 1lab
582 n_affected = np.sum(np.invert(strict_increase)) 1l
583 _log.info(f'frame count not strictly increasing: ' 1l
584 f'{n_affected} frames affected ({n_affected / strict_increase.size:.2%})')
585 return spec.QC.CRITICAL 1l
586 dropped = np.diff(self.data['count']).astype(int) - 1 1lab
587 pct_dropped = (sum(dropped) / len(dropped) * 100) 1lab
588 # Calculate overall outcome for this check
589 outcome = self.overall_outcome( 1lab
590 ('PASS' if size_diff == 0 else 'WARNING' if np.abs(size_diff) < 5 else 'FAIL',
591 'PASS' if pct_dropped < threshold else 'FAIL')
592 )
593 return outcome, int(sum(dropped)), size_diff 1lab
595 def check_timestamps(self):
596 """Check that the camera.times array is reasonable."""
597 if not data_for_keys(('timestamps', 'video'), self.data): 1scab
598 return spec.QC.NOT_SET 1s
599 # Check number of timestamps matches video
600 length_matches = self.data['timestamps'].size == self.data['video'].length 1scab
601 # Check times are strictly increasing
602 increasing = all(np.diff(self.data['timestamps']) > 0) 1scab
603 # Check times do not contain nans
604 nanless = not np.isnan(self.data['timestamps']).any() 1scab
605 return spec.QC.PASS if increasing and length_matches and nanless else spec.QC.FAIL 1scab
607 def check_camera_times(self):
608 """Check that the number of raw camera timestamps matches the number of video frames."""
609 if not data_for_keys(('bonsai_times', 'video'), self.data): 1ucab
610 return spec.QC.NOT_SET 1u
611 length_match = len(self.data['camera_times']) == self.data['video'].length 1ucab
612 outcome = spec.QC.PASS if length_match else spec.QC.WARNING 1ucab
613 # 1 / np.median(np.diff(self.data.camera_times))
614 return outcome, len(self.data['camera_times']) - self.data['video'].length 1ucab
616 def check_resolution(self):
617 """Check that the timestamps and video file resolution match what we expect."""
618 if self.data['video'] is None: 1ocabe
619 return spec.QC.NOT_SET 1o
620 actual = self.data['video'] 1ocabe
621 expected = self.video_meta[self.type][self.label] 1ocabe
622 match = actual['width'] == expected['width'] and actual['height'] == expected['height'] 1ocabe
623 return spec.QC.PASS if match else spec.QC.FAIL 1ocabe
625 def check_wheel_alignment(self, tolerance=(1, 2), display=False):
626 """Check wheel motion in video correlates with the rotary encoder signal.
628 Check is skipped for body camera videos as the wheel is often obstructed
630 Parameters
631 ----------
632 tolerance : int, (int, int)
633 Maximum absolute offset in frames. If two values, the maximum value is taken as the
634 warning threshold.
635 display : bool
636 If true, the wheel motion energy is plotted against the rotary encoder.
638 Returns
639 -------
640 one.alf.spec.QC
641 The outcome, one of {'NOT_SET', 'FAIL', 'WARNING', 'PASS'}.
642 int
643 Frame offset, i.e. by how many frames the video was shifted to match the rotary encoder
644 signal. Negative values mean the video was shifted backwards with respect to the wheel
645 timestamps.
647 Notes
648 -----
649 - A negative frame offset typically means that there were frame TTLs at the beginning that
650 do not correspond to any video frames (sometimes the first few frames aren't saved to
651 disk). Since 2021-09-15 the extractor should compensate for this.
652 """
653 wheel_present = data_for_keys(('position', 'timestamps', 'period'), self.data['wheel']) 1nmcab
654 if not wheel_present or self.label == 'body': 1nmcab
655 return spec.QC.NOT_SET 1nm
657 # Check the selected wheel movement period occurred within camera timestamp time
658 camera_times = self.data['timestamps'] 1ncab
659 in_range = within_ranges(camera_times, self.data['wheel']['period'].reshape(-1, 2)) 1ncab
660 if not in_range.any(): 1ncab
661 # Check if any camera timestamps overlap with the wheel times
662 if np.any(np.logical_and( 1nc
663 camera_times > self.data['wheel']['timestamps'][0],
664 camera_times < self.data['wheel']['timestamps'][-1])
665 ):
666 _log.warning('Unable to check wheel alignment: ' 1nc
667 'chosen movement is not during video')
668 return spec.QC.NOT_SET 1nc
669 else:
670 # No overlap, return fail
671 return spec.QC.FAIL 1n
672 aln = MotionAlignment(self.eid, self.one, self.log, session_path=self.session_path) 1ab
673 aln.data = self.data.copy() 1ab
674 aln.data['camera_times'] = {self.label: camera_times} 1ab
675 aln.video_paths = {self.label: self.video_path} 1ab
676 offset, *_ = aln.align_motion(period=self.data['wheel'].period, 1ab
677 display=display, side=self.label)
678 if offset is None: 1ab
679 return spec.QC.NOT_SET
680 if display: 1ab
681 aln.plot_alignment()
683 # Determine the outcome. If there are two values for the tolerance, one is taken to be
684 # a warning threshold, the other a failure threshold.
685 out_map = {0: spec.QC.WARNING, 1: spec.QC.WARNING, 2: spec.QC.PASS} # 0: FAIL -> WARNING Aug 2022 1ab
686 passed = np.abs(offset) <= np.sort(np.array(tolerance)) 1ab
687 return out_map[sum(passed)], int(offset) 1ab
689 def check_position(self, hist_thresh=(75, 80), pos_thresh=(10, 15),
690 metric=cv2.TM_CCOEFF_NORMED,
691 display=False, test=False, roi=None, pct_thresh=True):
692 """Check camera is positioned correctly.
694 For the template matching zero-normalized cross-correlation (default) should be more
695 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
696 also work.
698 If display is True, the template ROI (pick hashed) is plotted over a video frame,
699 along with the threshold regions (green solid). The histogram correlations are plotted
700 and the full histogram is plotted for one of the sample frames and the reference frame.
702 :param hist_thresh: The minimum histogram cross-correlation threshold to pass (0-1).
703 :param pos_thresh: The maximum number of pixels off that the template matcher may be off
704 by. If two values are provided, the lower threshold is treated as a warning boundary.
705 :param metric: The metric to use for template matching.
706 :param display: If true, the results are plotted
707 :param test: If true a reference frame instead of the frames in frame_samples.
708 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
709 :param pct_thresh: If true, the thresholds are treated as percentages
710 """
711 if not test and self.data['frame_samples'] is None: 1icabe
712 return spec.QC.NOT_SET 1i
713 refs = self.load_reference_frames(self.label) 1icabe
714 # ensure iterable
715 pos_thresh = np.sort(np.array(pos_thresh)) 1icabe
716 hist_thresh = np.sort(np.array(hist_thresh)) 1icabe
718 # Method 1: compareHist
719 #### Mean hist comparison
720 # ref_h = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in refs]
721 # ref_h = np.array(ref_h).mean(axis=0)
722 # frames = refs if test else self.data['frame_samples']
723 # hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames]
724 # test_h = np.array(hists).mean(axis=0)
725 # corr = cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL)
726 # if pct_thresh:
727 # corr *= 100
728 # hist_passed = corr > hist_thresh
729 ####
730 ref_h = cv2.calcHist([refs[0]], [0], None, [256], [0, 256]) 1icabe
731 frames = refs if test else self.data['frame_samples'] 1icabe
732 hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames] 1icabe
733 corr = np.array([cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL) for test_h in hists]) 1icabe
734 if pct_thresh: 1icabe
735 corr *= 100 1icabe
736 hist_passed = [np.all(corr > x) for x in hist_thresh] 1icabe
738 # Method 2:
739 top_left, roi, template = self.find_face(roi=roi, test=test, metric=metric, refs=refs) 1icabe
740 (y1, y2), (x1, x2) = roi 1icabe
741 err = (x1, y1) - np.median(np.array(top_left), axis=0) 1icabe
742 h, w = frames[0].shape[:2] 1icabe
744 if pct_thresh: # Threshold as percent 1icabe
745 # t_x, t_y = pct_thresh
746 err_pct = [(abs(x) / y) * 100 for x, y in zip(err, (h, w))] 1icabe
747 face_passed = [all(err_pct < x) for x in pos_thresh] 1icabe
748 else:
749 face_passed = [np.all(np.abs(err) < x) for x in pos_thresh] 1i
751 if display: 1icabe
752 plt.figure() 1i
753 # Plot frame with template overlay
754 img = frames[0] 1i
755 ax0 = plt.subplot(221) 1i
756 ax0.imshow(img, cmap='gray', vmin=0, vmax=255) 1i
757 bounds = (x1 - err[0], x2 - err[0], y2 - err[1], y1 - err[1]) 1i
758 ax0.imshow(template, cmap='gray', alpha=0.5, extent=bounds) 1i
759 if pct_thresh: 1i
760 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1i
761 t_y = (h / 100) * thresh 1i
762 t_x = (w / 100) * thresh 1i
763 xy = (x1 - t_x, y1 - t_y) 1i
764 ax0.add_patch(Rectangle(xy, x2 - x1 + (t_x * 2), y2 - y1 + (t_y * 2), 1i
765 fill=True, facecolor=c, lw=0, alpha=0.05))
766 else:
767 for c, thresh in zip(('green', 'yellow'), pos_thresh): 1i
768 xy = (x1 - thresh, y1 - thresh) 1i
769 ax0.add_patch(Rectangle(xy, x2 - x1 + (thresh * 2), y2 - y1 + (thresh * 2), 1i
770 fill=True, facecolor=c, lw=0, alpha=0.05))
771 xy = (x1 - err[0], y1 - err[1]) 1i
772 ax0.add_patch(Rectangle(xy, x2 - x1, y2 - y1, 1i
773 edgecolor='pink', fill=False, hatch='//', lw=1))
774 ax0.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0)) 1i
775 ax0.set_axis_off() 1i
776 # Plot the image histograms
777 ax1 = plt.subplot(212) 1i
778 ax1.plot(ref_h[5:-1], label='reference frame') 1i
779 ax1.plot(np.array(hists).mean(axis=0)[5:-1], label='mean frame') 1i
780 ax1.set_xlim([0, 256]) 1i
781 plt.legend() 1i
782 # Plot the correlations for each sample frame
783 ax2 = plt.subplot(222) 1i
784 ax2.plot(corr, label='hist correlation') 1i
785 ax2.axhline(hist_thresh[0], 0, self.n_samples, 1i
786 linestyle=':', color='r', label='fail threshold')
787 ax2.axhline(hist_thresh[1], 0, self.n_samples, 1i
788 linestyle=':', color='g', label='pass threshold')
789 ax2.set(xlabel='Sample Frame #', ylabel='Hist correlation') 1i
790 plt.legend() 1i
791 plt.suptitle('Check position') 1i
792 plt.show() 1i
794 pass_map = {i: s for i, s in enumerate(('FAIL', 'WARNING', 'PASS'))} 1icabe
795 face_aligned = pass_map[sum(face_passed)] 1icabe
796 hist_correlates = pass_map[sum(hist_passed)] 1icabe
798 return self.overall_outcome([face_aligned, hist_correlates], agg=min) 1icabe
800 def check_focus(self, n=20, threshold=(100, 6),
801 roi=False, display=False, test=False, equalize=True):
802 """Check video is in focus.
804 Two methods are used here: Looking at the high frequencies with a DFT and
805 applying a Laplacian HPF and looking at the variance.
807 Note:
808 - Both methods are sensitive to noise (Laplacian is 2nd order filter).
809 - The thresholds for the fft may need to be different for the left/right vs body as
810 the distribution of frequencies in the image is different (e.g. the holder
811 comprises mostly very high frequencies).
812 - The image may be overall in focus but the places we care about can still be out of
813 focus (namely the face). For this we'll take an ROI around the face.
814 - Focus check thrown off by brightness. This may be fixed by equalizing the histogram
815 (set equalize=True)
817 Parameters
818 ----------
819 n : int
820 Number of frames from frame_samples data to use in check.
821 threshold : tuple of float
822 The lower boundary for Laplacian variance and mean FFT filtered brightness,
823 respectively.
824 roi : bool, None, list of slice
825 If False, the roi is determined via template matching for the face or body.
826 If None, some set ROIs for face and paws are used. A list of slices may also be passed.
827 display : bool
828 If true, the results are displayed.
829 test : bool
830 If true, a set of artificially blurred reference frames are used as the input. This can
831 be used to selecting reasonable thresholds.
832 equalize : bool
833 If true, the histograms of the frames are equalized, resulting in an increased the
834 global contrast and linear CDF. This makes check robust to low light conditions.
836 Returns
837 -------
838 one.spec.QC
839 The QC outcome, either FAIL or PASS.
840 """
841 no_frames = self.data['frame_samples'] is None or len(self.data['frame_samples']) == 0 1dcabe
842 if not test and no_frames: 1dcabe
843 return spec.QC.NOT_SET 1d
845 if roi is False: 1dcabe
846 top_left, roi, _ = self.find_face(test=test) # (y1, y2), (x1, x2) 1dcabe
847 h, w = map(lambda x: np.diff(x).item(), roi) 1dcabe
848 x, y = np.median(np.array(top_left), axis=0).round().astype(int) 1dcabe
849 roi = (np.s_[y: y + h, x: x + w],) 1dcabe
850 else:
851 ROI = { 1d
852 'left': (np.s_[:400, :561], np.s_[500:, 100:800]), # (face, wheel)
853 'right': (np.s_[:196, 397:], np.s_[221:, 255:]),
854 'body': (np.s_[143:274, 84:433],) # body holder
855 }
856 roi = roi or ROI[self.label] 1d
858 if test: 1dcabe
859 """In test mode load a reference frame and run it through a normalized box filter with 1d
860 increasing kernel size.
861 """
862 idx = (0,) 1d
863 ref = self.load_reference_frames(self.label)[idx] 1d
864 kernal_sz = np.unique(np.linspace(0, 15, n, dtype=int)) 1d
865 n = kernal_sz.size # Size excluding repeated kernels 1d
866 img = np.empty((n, *ref.shape), dtype=np.uint8) 1d
867 for i, k in enumerate(kernal_sz): 1d
868 img[i] = ref.copy() if k == 0 else cv2.blur(ref, (k, k)) 1d
869 if equalize: 1d
870 [cv2.equalizeHist(x, x) for x in img] 1d
871 if display: 1d
872 # Plot blurred images
873 f, axes = plt.subplots(1, len(kernal_sz)) 1d
874 for ax, ig, k in zip(axes, img, kernal_sz): 1d
875 self.imshow(ig, ax=ax, title='Kernal ({0}, {0})'.format(k or 'None')) 1d
876 f.suptitle('Reference frame with box filter') 1d
877 else:
878 # Sub-sample the frame samples
879 idx = np.unique(np.linspace(0, len(self.data['frame_samples']) - 1, n, dtype=int)) 1dcabe
880 img = self.data['frame_samples'][idx] 1dcabe
881 if equalize: 1dcabe
882 [cv2.equalizeHist(x, x) for x in img] 1dcabe
884 # A measure of the sharpness effectively taking the second derivative of the image
885 lpc_var = np.empty((min(n, len(img)), len(roi))) 1dcabe
886 for i, frame in enumerate(img[::-1]): 1dcabe
887 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1dcabe
888 lpc_var[i] = [lpc[mask].var() for mask in roi] 1dcabe
890 if display: 1dcabe
891 # Plot the first sample image
892 f = plt.figure() 1d
893 gs = f.add_gridspec(len(roi) + 1, 3) 1d
894 f.add_subplot(gs[0:len(roi), 0]) 1d
895 frame = img[0] 1d
896 self.imshow(frame, title=f'Frame #{self.frame_samples_idx[idx[0]]}') 1d
897 # Plot the ROIs with and without filter
898 lpc = cv2.Laplacian(frame, cv2.CV_16S, ksize=1) 1d
899 abs_lpc = cv2.convertScaleAbs(lpc) 1d
900 for i, r in enumerate(roi): 1d
901 f.add_subplot(gs[i, 1]) 1d
902 self.imshow(frame[r], title=f'ROI #{i + 1}') 1d
903 f.add_subplot(gs[i, 2]) 1d
904 self.imshow(abs_lpc[r], title=f'ROI #{i + 1} - Lapacian filter') 1d
905 f.suptitle('Laplacian blur detection') 1d
906 # Plot variance over frames
907 ax = f.add_subplot(gs[len(roi), :]) 1d
908 ln = plt.plot(lpc_var) 1d
909 [l.set_label(f'ROI #{i + 1}') for i, l in enumerate(ln)] 1d
910 ax.axhline(threshold[0], 0, n, linestyle=':', color='r', label='lower threshold') 1d
911 ax.set(xlabel='Frame sample', ylabel='Variance of the Laplacian') 1d
912 plt.tight_layout() 1d
913 plt.legend() 1d
915 # Second test is to highpass with dft
916 h, w = img.shape[1:] 1dcabe
917 cX, cY = w // 2, h // 2 1dcabe
918 sz = 60 # Seems to be the magic number for high pass 1dcabe
919 mask = np.ones((h, w, 2), bool) 1dcabe
920 mask[cY - sz:cY + sz, cX - sz:cX + sz] = False 1dcabe
921 filt_mean = np.empty(len(img)) 1dcabe
922 for i, frame in enumerate(img[::-1]): 1dcabe
923 dft = cv2.dft(np.float32(frame), flags=cv2.DFT_COMPLEX_OUTPUT) 1dcabe
924 f_shift = np.fft.fftshift(dft) * mask # Shift & remove low frequencies 1dcabe
925 f_ishift = np.fft.ifftshift(f_shift) # Shift back 1dcabe
926 filt_frame = cv2.idft(f_ishift) # Reconstruct 1dcabe
927 filt_frame = cv2.magnitude(filt_frame[..., 0], filt_frame[..., 1]) 1dcabe
928 # Re-normalize to 8-bits to make threshold simpler
929 img_back = cv2.normalize(filt_frame, None, alpha=0, beta=256, 1dcabe
930 norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
931 filt_mean[i] = np.mean(img_back) 1dcabe
932 if i == len(img) - 1 and display: 1dcabe
933 # Plot Fourier transforms
934 f = plt.figure() 1d
935 gs = f.add_gridspec(2, 3) 1d
936 self.imshow(img[0], ax=f.add_subplot(gs[0, 0]), title='Original frame') 1d
937 dft_shift = np.fft.fftshift(dft) 1d
938 magnitude = 20 * np.log(cv2.magnitude(dft_shift[..., 0], dft_shift[..., 1])) 1d
939 self.imshow(magnitude, ax=f.add_subplot(gs[0, 1]), title='Magnitude spectrum') 1d
940 self.imshow(img_back, ax=f.add_subplot(gs[0, 2]), title='Filtered frame') 1d
941 ax = f.add_subplot(gs[1, :]) 1d
942 ax.plot(filt_mean) 1d
943 ax.axhline(threshold[1], 0, n, linestyle=':', color='r', label='lower threshold') 1d
944 ax.set(xlabel='Frame sample', ylabel='Mean of filtered frame') 1d
945 f.suptitle('Discrete Fourier Transform') 1d
946 plt.show() 1d
947 passes = np.all(lpc_var > threshold[0]) or np.all(filt_mean > threshold[1]) 1dcabe
948 return spec.QC.PASS if passes else spec.QC.FAIL 1dcabe
950 def find_face(self, roi=None, test=False, metric=cv2.TM_CCOEFF_NORMED, refs=None):
951 """Use template matching to find face location in frame.
953 For the template matching zero-normalized cross-correlation (default) should be more
954 robust to exposure (which we're not checking here). The L2 norm (TM_SQDIFF) should
955 also work. That said, normalizing the histograms works best.
957 :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
958 :param test: If True the template is matched against frames that come from the same session
959 :param metric: The metric to use for template matching
960 :param refs: An array of frames to match the template to
962 :returns: (y1, y2), (x1, x2)
963 """
964 ROI = { 1dicabe
965 'left': ((45, 346), (138, 501)),
966 'right': ((14, 174), (430, 618)),
967 'body': ((141, 272), (90, 339))
968 }
969 roi = roi or ROI[self.label] 1dicabe
970 refs = self.load_reference_frames(self.label) if refs is None else refs 1dicabe
972 frames = refs if test else self.data['frame_samples'] 1dicabe
973 template = refs[0][tuple(slice(*r) for r in roi)] 1dicabe
974 top_left = [] # [(x1, y1), ...] 1dicabe
975 for frame in frames: 1dicabe
976 res = cv2.matchTemplate(frame, template, metric) 1dicabe
977 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) 1dicabe
978 # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
979 top_left.append(min_loc if metric < 2 else max_loc) 1dicabe
980 # bottom_right = (top_left[0] + w, top_left[1] + h)
981 return top_left, roi, template 1dicabe
983 @staticmethod
984 def load_reference_frames(side):
985 """Load some reference frames for a given video.
987 The reference frames are from sessions where the camera was well positioned. The
988 frames are in qc/reference, one file per camera, only one channel per frame. The
989 session eids can be found in qc/reference/frame_src.json
991 :param side: Video label, e.g. 'left'
992 :return: numpy array of frames with the shape (n, h, w)
993 """
994 file = next(Path(__file__).parent.joinpath('reference').glob(f'frames_{side}.npy')) 1jdicabe
995 refs = np.load(file) 1jdicabe
996 return refs 1jdicabe
998 @staticmethod
999 def imshow(frame, ax=None, title=None, **kwargs):
1000 """plt.imshow with some convenient defaults for greyscale frames."""
1001 h = ax or plt.gca() 1jd
1002 defaults = { 1jd
1003 'cmap': kwargs.pop('cmap', 'gray'),
1004 'vmin': kwargs.pop('vmin', 0),
1005 'vmax': kwargs.pop('vmax', 255)
1006 }
1007 h.imshow(frame, **defaults, **kwargs) 1jd
1008 h.set(title=title) 1jd
1009 h.set_axis_off() 1jd
1010 return ax 1jd
1013class CameraQCCamlog(CameraQC):
1014 """
1015 A class for computing camera QC metrics from camlog data.
1017 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1018 timestamp alignment.
1019 """
1021 dstypes = [
1022 '_iblrig_taskData.raw',
1023 '_iblrig_taskSettings.raw',
1024 '_iblrig_Camera.raw',
1025 'camera.times',
1026 'wheel.position',
1027 'wheel.timestamps'
1028 ]
1029 dstypes_fpga = [
1030 '_spikeglx_sync.channels',
1031 '_spikeglx_sync.polarities',
1032 '_spikeglx_sync.times',
1033 'DAQData.raw.meta',
1034 'DAQData.wiring'
1035 ]
1037 def __init__(self, session_path_or_eid, camera, sync_collection='raw_sync_data', sync_type='nidq', **kwargs):
1038 """Compute camera QC metrics from camlog data.
1040 For this QC we expect the check_pin_state to be NOT_SET as we are not using the GPIO for
1041 timestamp alignment.
1042 """
1043 super().__init__(session_path_or_eid, camera, sync_collection=sync_collection, sync_type=sync_type, **kwargs)
1044 self._type = 'ephys'
1045 self.checks_to_remove = ['check_pin_state']
1047 def load_data(self, extract_times: bool = False, load_video: bool = True, **kwargs) -> None:
1048 """Extract the data from raw data files.
1050 Extracts all the required task data from the raw data files.
1052 Data keys:
1053 - count (int array): the sequential frame number (n, n+1, n+2...)
1054 - pin_state (): the camera GPIO pin; records the audio TTLs; should be one per frame
1055 - audio (float array): timestamps of audio TTL fronts
1056 - fpga_times (float array): timestamps of camera TTLs recorded by FPGA
1057 - timestamps (float array): extracted video timestamps (the camera.times ALF)
1058 - bonsai_times (datetime array): system timestamps of video PC; should be one per frame
1059 - camera_times (float array): camera frame timestamps extracted from frame headers
1060 - wheel (Bunch): rotary encoder timestamps, position and period used for wheel motion
1061 - video (Bunch): video meta data, including dimensions and FPS
1062 - frame_samples (h x w x n array): array of evenly sampled frames (1 colour channel)
1064 Missing data will raise an AssertionError
1065 :param extract_times: if True, the camera.times are re-extracted from the raw data
1066 :param load_video: if True, calls the load_video_data method
1067 """
1068 assert self.session_path, 'no session path set'
1069 _log.info('Gathering data for QC')
1071 # If there is an experiment description and there are video parameters
1072 sess_params = read_params(self.session_path) or {}
1073 video_collection = get_video_collection(sess_params, self.label)
1074 task_collection = get_task_collection(sess_params)
1075 self._set_sync(sess_params)
1076 self._update_meta_from_session_params(sess_params)
1078 # Get frame count
1079 log, _ = parse_cam_log(self.session_path.joinpath(video_collection, f'_iblrig_{self.label}Camera.raw.camlog'))
1080 self.data['count'] = log.frame_id.values
1082 # Load the audio and raw FPGA times
1083 sync = chmap = None
1084 if self.sync != 'bpod' and self.sync is not None:
1085 sync, chmap = ephys_fpga.get_sync_and_chn_map(self.session_path, self.sync_collection)
1086 audio_ttls = ephys_fpga.get_sync_fronts(sync, chmap['audio'])
1087 self.data['audio'] = audio_ttls['times'] # Get rises
1088 # Load raw FPGA times
1089 cam_ts = camio.extract_camera_sync(sync, chmap)
1090 self.data['fpga_times'] = cam_ts[self.label]
1091 else:
1092 bpod_data = raw.load_data(self.session_path, task_collection=task_collection)
1093 _, audio_ttls = raw.load_bpod_fronts(self.session_path, data=bpod_data, task_collection=task_collection)
1094 self.data['audio'] = audio_ttls['times']
1096 # Load extracted frame times
1097 alf_path = self.session_path / 'alf'
1098 try:
1099 assert not extract_times
1100 cam_path = next(alf_path.rglob(f'*{self.label}Camera.times*')).parent
1101 self.data['timestamps'] = alfio.load_object(
1102 cam_path, f'{self.label}Camera', short_keys=True)['times']
1103 except AssertionError: # Re-extract
1104 kwargs = dict(video_path=self.video_path, save=False)
1105 if self.sync == 'bpod':
1106 extractor = camio.CameraTimestampsBpod(self.session_path, task_collection=task_collection)
1107 else:
1108 kwargs.update(sync=sync, chmap=chmap)
1109 extractor = camio.CameraTimestampsCamlog(self.label, self.session_path)
1110 outputs, _ = extractor.extract(**kwargs)
1111 self.data['timestamps'] = outputs[f'{self.label}_camera_timestamps']
1112 except ALFObjectNotFound:
1113 _log.warning('no camera.times ALF found for session')
1115 # Get audio and wheel data
1116 wheel_keys = ('timestamps', 'position')
1117 try:
1118 # glob in case wheel data are in sub-collections
1119 wheel_path = next(alf_path.rglob('*wheel.timestamps*')).parent
1120 self.data['wheel'] = alfio.load_object(wheel_path, 'wheel', short_keys=True)
1121 except ALFObjectNotFound:
1122 # Extract from raw data
1123 if self.sync != 'bpod':
1124 wheel_data = ephys_fpga.extract_wheel_sync(sync, chmap)
1125 else:
1126 wheel_data = training_wheel.get_wheel_position(self.session_path, task_collection=task_collection)
1127 self.data['wheel'] = Bunch(zip(wheel_keys, wheel_data))
1129 # Find short period of wheel motion for motion correlation.
1130 if data_for_keys(wheel_keys, self.data['wheel']) and self.data['timestamps'] is not None:
1131 self.data['wheel'].period = self.get_active_wheel_period(self.data['wheel'])
1133 # load in camera times
1134 self.data['camera_times'] = log.timestamp.values
1136 # Gather information from video file
1137 if load_video:
1138 _log.info('Inspecting video file...')
1139 self.load_video_data()
1141 def check_camera_times(self):
1142 """Check that the number of raw camera timestamps matches the number of video frames."""
1143 if not data_for_keys(('camera_times', 'video'), self.data):
1144 return spec.QC.NOT_SET
1145 length_match = len(self.data['camera_times']) == self.data['video'].length
1146 outcome = spec.QC.PASS if length_match else spec.QC.WARNING
1147 # 1 / np.median(np.diff(self.data.camera_times))
1148 return outcome, len(self.data['camera_times']) - self.data['video'].length
1151def data_for_keys(keys, data):
1152 """Check keys exist in 'data' dict and contain values other than None."""
1153 return data is not None and all(k in data and data.get(k, None) is not None for k in keys) 1ulksnmcahgb
1156def get_task_collection(sess_params):
1157 """
1158 Return the first non-passive task collection.
1160 Returns the first task collection from the experiment description whose task name does not
1161 contain 'passive', otherwise returns 'raw_behavior_data'.
1163 Parameters
1164 ----------
1165 sess_params : dict
1166 The loaded experiment description file.
1168 Returns
1169 -------
1170 str:
1171 The collection presumed to contain wheel data.
1172 """
1173 sess_params = sess_params or {} 1vcahgb
1174 tasks = (chain(*map(dict.items, sess_params.get('tasks', [])))) 1vcahgb
1175 return next((v['collection'] for k, v in tasks if 'passive' not in k), 'raw_behavior_data') 1vcahgb
1178def get_video_collection(sess_params, label):
1179 """
1180 Return the collection containing the raw video data for a given camera.
1182 Parameters
1183 ----------
1184 sess_params : dict
1185 The loaded experiment description file.
1186 label : str
1187 The camera label.
1189 Returns
1190 -------
1191 str:
1192 The collection presumed to contain the video data.
1193 """
1194 DEFAULT = 'raw_video_data'
1195 value = sess_params or {}
1196 for key in ('devices', 'cameras', label, 'collection'):
1197 value = value.get(key)
1198 if not value:
1199 return DEFAULT
1200 return value
1203def run_all_qc(session, cameras=('left', 'right', 'body'), **kwargs):
1204 """Run QC for all cameras.
1206 Run the camera QC for left, right and body cameras.
1208 :param session: A session path or eid.
1209 :param update: If True, QC fields are updated on Alyx.
1210 :param cameras: A list of camera names to perform QC on.
1211 :param stream: If true and local video files not available, the data are streamed from
1212 the remote source.
1213 :return: dict of CameraCQ objects
1214 """
1215 qc = {} 1pqa
1216 camlog = kwargs.pop('camlog', False) 1pqa
1217 CamQC = CameraQCCamlog if camlog else CameraQC 1pqa
1219 run_args = {k: kwargs.pop(k) for k in ('extract_times', 'update') 1pqa
1220 if k in kwargs.keys()}
1221 for camera in cameras: 1pqa
1222 qc[camera] = CamQC(session, camera, **kwargs) 1pqa
1223 qc[camera].run(**run_args) 1pqa
1224 return qc 1pqa