mmwavecapture.capture package#

Submodules#

mmwavecapture.capture.capture module#

class mmwavecapture.capture.capture.Capture(base_path: Path)#

Bases: object

add_capture_hardware(hw: CaptureHardware) None#
capture() None#
class mmwavecapture.capture.capture.CaptureHardware#

Bases: ABC

CaptureHardware is the abstract class for all capture hardware.

For each capture hardware, there are 5 stages of capture process:

  1. Initialize capture hardware

    This stage should initialize and config the capture hardware, and make sure the capture hardware is ready to capture data.

    Note

    Do not create any output files in this stage.

  2. Prepare capture environment and output files

    This stage should setup the capture environment and create output files for the capture hardware. You should also setup thread/processes for capturing data at this stage, but not start them.

    Warning

    The output files should be created under base_path

    If you are using Capture class, the hardware base_path will be setup after calling Capture.add_capture_hardware(). If you are using CaptureManager class, the hardware base_path will be setup during CaptureManager.capture(). It not using any of the above classes, you should setup the base_path by yourself before calling CaptureHardware.prepare_capture().

  3. Start capture

    This stage should start the capture process/thread.

  4. Stop capture

    This stage should stop the capture process/thread and close the output files.

  5. Dump configuration

    This stage should dump the configuration of the capture hardware to base_path/<config_name> for future reference.

property base_path: Path | None#

The base path for the capture hardware

The base path will be set by Capture class or CaptureManager class. Or by yourself if you are not using any of the above classes.

If set it by CaptureManager, the base path should be <dataset_path>/<capture_path>/<hw_name>/.

Setter:

Set the base path for the capture hardware

Getter:

Get the base path for the capture hardware

abstract dump_config() None#

Dump the configuration of the capture hardware to base_path

property hw_name: str#
abstract init_capture_hw() None#

Initialize the capture hardware

abstract prepare_capture() None#

Prepare the capture environment and output files

Output filename should be self.base_path/<sensor>.*

abstract start_capture() None#

Start the sensor to capture data

abstract stop_capture() None#

Stop the sensor and capture output files

class mmwavecapture.capture.capture.CaptureManager(config_filename: Path)#

Bases: object

Capture Manager manages HDF5-like dataset directory structure and handle capture hardware initialization and capture process.

The layout of dataset directory is as follows:

dataset_path/           # Create when initalizing `CaptureManager`
├── capture_00000/      # Create when calling `CaptureManager.capture()`
│   ├── config.toml     # Capture configuration
│   ├── iwr1843_vert/   # Capture hardware name
│   │   ├── dca.pcap    # DCA1000EVM capture pcap
│   │   ├── radar.cfg   # Radar configuration
│   │   ├── dca.json    # DCA1000EVM configuration
│   ├── realsense/         # Another capture hardware name
│   │   ├── color.avi      # Color video
├── capture_00001/
│   ├── config.toml
│   ├── iwr1843_vert/
│   │   ├── dca.pcap
...

The calling sequence of CaptureManager is as follows:

  1. Initialize CaptureManager with config.toml path

  2. Initialize capture hardware with CaptureManager.init_hw()

  3. Start capture by calling CaptureManager.capture()

CAPTURE_DIR_FORMAT = 'capture_{:05d}'#
CAPTURE_DIR_PREFIX = 'capture_'#
CAPTURE_LOG_FILENAME = 'capture.log'#
CAPTURE_MANAGER_CONFIG_OUTPUT_FILENAME = 'config.toml'#
capture() None#
init_hw() None#

mmwavecapture.capture.radardca module#

class mmwavecapture.capture.radardca.RadarDCA(hw_name: str, dca_eth_interface: str, radar_config_filename: Path, radar_config_port: str = '/dev/ttyACM0', radar_data_port: str = '/dev/ttyACM1', dca_ip: str = '192.168.33.180', dca_config_port: int = 4096, host_ip: str = '192.168.33.30', capture_frames: int = 100, init_capture_hw: bool = True, **kwargs: Dict[str, Any])#

Bases: CaptureHardware

DCA_CONFIG_FILENAME = 'dca.json'#
PCAP_OUTPUT_FILENAME = 'dca.pcap'#
RADAR_CONFIG_FILENAME = 'radar.cfg'#
TCPDUMP_BIN_PATH = '/usr/sbin/tcpdump'#
dump_config() None#

Dump the configuration of the capture hardware to base_path

init_capture_hw() None#

Initialize the capture hardware

prepare_capture() None#

Prepare the capture environment and output files

Output filename should be self.base_path/<sensor>.*

start_capture() None#

Start the sensor to capture data

start_tcpdump_capture(outfile: Path) None#
stop_capture() None#

Stop the sensor and capture output files

stop_tcpdump_capture() None#

mmwavecapture.capture.realsense module#

class mmwavecapture.capture.realsense.CameraIntrinsics(intrinsics: intrinsics)#

Bases: object

Camera intrinsics

Ref: https://intelrealsense.github.io/librealsense/python_docs/_generated/pyrealsense2.intrinsics.html

coeffs: list[float]#

Distortion coefficients

fx: float#

Focal length of the image plane, as a multiple of pixel width

fy: float#

Focal length of the image plane, as a multiple of pixel height

model: str#

Distortion model of the image

ppx: float#

Horizontal coordinate of the principal point of the image, as a pixel offset from the left edge

ppy: float#

Vertical coordinate of the principal point of the image, as a pixel offset from the top edge

class mmwavecapture.capture.realsense.ColorConfig(intrinsics: CameraIntrinsics, fps: int)#

Bases: object

fps: int#

Frame per second

intrinsics: CameraIntrinsics#

Camera intrinsics

class mmwavecapture.capture.realsense.ColorMetadata(frame_num, timestamp, stamp_frame_num, time_of_arrival, backend_timestamp, frame_timestamp, actual_fps)#

Bases: NamedTuple

actual_fps: int#

Alias for field number 6

backend_timestamp: float#

Alias for field number 4

frame_num: int#

Alias for field number 0

frame_timestamp: float#

Alias for field number 5

stamp_frame_num: int#

Alias for field number 2

time_of_arrival: float#

Alias for field number 3

timestamp: float#

Alias for field number 1

class mmwavecapture.capture.realsense.DepthConfig(intrinsics: CameraIntrinsics, depth_units: float, fps: int, visual_preset: int)#

Bases: object

depth_units: float#

Depth units

fps: int#

Frame per second

intrinsics: CameraIntrinsics#

Camera intrinsics

visual_preset: int#

Depth visual preset

class mmwavecapture.capture.realsense.DepthMetadata(frame_num, timestamp, stamp_frame_num, time_of_arrival, backend_timestamp, frame_timestamp, actual_fps)#

Bases: NamedTuple

actual_fps: int#

Alias for field number 6

backend_timestamp: float#

Alias for field number 4

frame_num: int#

Alias for field number 0

frame_timestamp: float#

Alias for field number 5

stamp_frame_num: int#

Alias for field number 2

time_of_arrival: float#

Alias for field number 3

timestamp: float#

Alias for field number 1

class mmwavecapture.capture.realsense.Realsense(hw_name: str, fps: int = 30, resolution: Tuple[int, int] = (1920, 1080), depth_resolution: Tuple[int, int] = (1280, 720), capture_frames: int = 150, rotate: bool = False, latency_skip_frames: int = 3, depth_visual_preset: int = 3, **kwargs: Dict[str, Any])#

Bases: CaptureHardware

COLOR_CONFIG_FILENAME = 'color_config.json'#
COLOR_METADATA_FILENAME = 'color_metadata.json'#
COLOR_OUTPUT_FILENAME = 'color.avi'#
DEPTH_CONFIG_FILENAME = 'depth_config.json'#
DEPTH_METADATA_FILENAME = 'depth_metadata.json'#
DEPTH_OUTPUT_FILENAME = 'depth.zst'#
dump_config() None#

Dump the configuration of the capture hardware to base_path

init_capture_hw() None#

Initialize the capture hardware

prepare_capture() None#

Prepare the capture environment and output files

Output filename should be self.base_path/<sensor>.*

start_capture() None#

Start the sensor to capture data

stop_capture() None#

Stop the sensor and capture output files

mmwavecapture.capture.realsense.stamp_framenum(img: ndarray, frame: int) ndarray#

Module contents#