From 61bc384826193d939a199545308c8a6e9f12cd1b Mon Sep 17 00:00:00 2001 From: cristhian aguilera Date: Fri, 30 Jan 2026 16:40:06 -0300 Subject: [PATCH] Working calibration --- AGENTS.md | 52 + config/calibration_poses_ulite6.yml | 31 + config/calibration_ulite6_zed.npz | Bin 0 -> 1926 bytes .../calibration_ulite6_zed_observations.npz | Bin 0 -> 2974 bytes dataflow_calibration_ulite6_zed.yml | 63 + dataflow_ulite6.yml | 2 +- dora_calibration/README.md | 50 + dora_calibration/dora_calibration/__init__.py | 1 + dora_calibration/dora_calibration/main.py | 1054 +++++++++++++++++ dora_calibration/pyproject.toml | 33 + .../tools/analyze_observations.py | 74 ++ .../tools/compare_calibrations.py | 114 ++ dora_calibration/tools/compare_hand_eye.py | 65 + dora_ulite6/README.md | 10 + dora_ulite6/dora_ulite6/main.py | 143 ++- dora_zed_cpp/DoraTargets.cmake | 24 +- dora_zed_cpp/main.cc | 7 + 17 files changed, 1717 insertions(+), 6 deletions(-) create mode 100644 AGENTS.md create mode 100644 config/calibration_poses_ulite6.yml create mode 100644 config/calibration_ulite6_zed.npz create mode 100644 config/calibration_ulite6_zed_observations.npz create mode 100644 dataflow_calibration_ulite6_zed.yml create mode 100644 dora_calibration/README.md create mode 100644 dora_calibration/dora_calibration/__init__.py create mode 100644 dora_calibration/dora_calibration/main.py create mode 100644 dora_calibration/pyproject.toml create mode 100644 dora_calibration/tools/analyze_observations.py create mode 100644 dora_calibration/tools/compare_calibrations.py create mode 100644 dora_calibration/tools/compare_hand_eye.py diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..797474a --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,52 @@ +AGENTS.md + +Purpose +Provide project guidelines for agents working in this repository. + +Scope +This repository uses Dora, Python, C++, and Rust. Prefer clear, easy-to-understand code and incremental implementation with verification. + +General Guidelines +- Keep external dependencies to a minimum. Prefer standard libraries and existing project dependencies. +- Make changes incrementally. Implement small steps, then test or verify before continuing. +- Document assumptions and hardware considerations when relevant. +- Favor readability: simple control flow, meaningful names, and small functions. + +Robot Safety +- This is a real robot project. Avoid changes that could cause unsafe motion or unsafe behavior. +- Add or preserve safeguards (limits, checks, bounds) for sensors and actuators. +- If a change affects motion control or safety, call it out explicitly and propose a validation plan. + +Language-Specific Rules +Python +- Follow PEP 8 style; use type hints where helpful. +- Prefer dataclasses for structured data. +- Avoid unnecessary abstractions. + +C++ +- Follow existing project style; default to modern C++ (C++17 or later) unless constrained. +- Prefer RAII and value semantics. +- Avoid raw ownership; use smart pointers where needed. + +Rust +- Use idiomatic Rust; prefer ownership/borrowing clarity over complex lifetimes. +- Favor explicit error handling with Result and ?. +- Keep modules small and focused. + +Dora +- Keep dataflow definitions simple and explicit. +- Prefer small nodes with clear inputs/outputs. +- Validate node I/O types and message schemas. + +Testing and Verification +- Add or update tests when feasible. +- For robot-facing changes, provide a staged validation plan: + 1) Unit or simulation check (if available). + 2) Dry-run or limited-scope run. + 3) Full system run with safety monitoring. + +Review Checklist +- Dependencies minimized +- Code readable and consistent with language rules +- Incremental changes validated +- Safety considerations addressed diff --git a/config/calibration_poses_ulite6.yml b/config/calibration_poses_ulite6.yml new file mode 100644 index 0000000..1ca7751 --- /dev/null +++ b/config/calibration_poses_ulite6.yml @@ -0,0 +1,31 @@ +--- +calibration_poses: + - position: {x: 0.25, y: 0.03, z: 0.39} + orientation_rpy_deg: {roll: -180.00, pitch: 1.00, yaw: 2.00} + + - position: {x: 0.24, y: 0.06, z: 0.45} + orientation_rpy_deg: {roll: -180.00, pitch: 2.00, yaw: 1.00} + + - position: {x: 0.24, y: 0.05, z: 0.45} + orientation_rpy_deg: {roll: +180.00, pitch: 1.00, yaw: 47.0} + + - position: {x: 0.24, y: -0.08, z: 0.45} + orientation_rpy_deg: {roll: -180.00, pitch: 1.00, yaw: -47.0} + + - position: {x: 0.24, y: 0.07, z: 0.50} + orientation_rpy_deg: {roll: -180.00, pitch: 4.00, yaw: 4.00} + + - position: {x: 0.19, y: -0.20, z: 0.38} + orientation_rpy_deg: {roll: -151.00, pitch: 0.00, yaw: 18.0} + + - position: {x: 0.25, y: 0.13, z: 0.39} + orientation_rpy_deg: {roll: 156.00, pitch: 5.80, yaw: 25.1} + + - position: {x: 0.26, y: 0.14, z: 0.31} + orientation_rpy_deg: {roll: 161.00, pitch: 10.00, yaw: 20.00} + + - position: {x: 0.13, y: -0.03, z: 0.36} + orientation_rpy_deg: {roll: 176.00, pitch: -21.00, yaw: 5.00} + + - position: {x: 0.33, y: 0.01, z: 0.36} + orientation_rpy_deg: {roll: 180.00, pitch: 16.00, yaw: -2.00} diff --git a/config/calibration_ulite6_zed.npz b/config/calibration_ulite6_zed.npz new file mode 100644 index 0000000000000000000000000000000000000000..cb372ee7cd4dba87e7eb7fd6c2f035bc44766e6e GIT binary patch literal 1926 zcmWIWW@gc4fB;2?^_JZ)|3iTwg9t-Nd~#y0QF>8kK|yMfUS2^ZBZB|~10z&Dl%DJt z>KhQr$WX>mt)7xvoLr=CrJ!z;W}&X5pq`drR8o|f7oT60k_r-cOUx-w1&SAEBo?Fs z`5GoV3MQI53bhJk11^U9yEiHHWZ0K*O@4p>|9ksqQOD~%IE(ksxO{nIe)Kwfw|N;o z`TyVVXSlkl&goZ%{hp&CijHRO_RDXk{1$9mYJasX`P6jVcKaPHNl!wb7TIgBo3}Ix zXif#su>&{qrr1M$i-Y=LAAk~sRv&jhhXtVsFbI6!RG@n*ffz)`192u0=Rw&SKspi1PXW@XKDN8QI}+w&E})Nd zQ%f@PQ(*p_08|eGFdBzHP3YrI1+EaFTY`bu5r{p37}YPI)_(mC^9v~U@=9~#^OK5G zi^{+;SPb)32T&&nz-a8g$|NoiP#k#*TwD<6p!vqkBsLS~8&GxzX6DqQ#Q5CAlA_EC zm{(3fwZdrZULi6&8|x?-Q|29zPadSbYIA@(3rwA@Z${?Ba4#R}p90HzkH%m4rY literal 0 HcmV?d00001 diff --git a/config/calibration_ulite6_zed_observations.npz b/config/calibration_ulite6_zed_observations.npz new file mode 100644 index 0000000000000000000000000000000000000000..2d8c244b5d788b276ae0beed28e3796a3d77e53f GIT binary patch literal 2974 zcmd58dpMN&dWO)_7E)<-WFwyo!pF=%lSxG-;Yr%ox<5Bty5A+;ZCzjd4;L zlr(br6}AgQV+@1QXzL-2nNN4II5V}*vvkfsXZLx|dB5-XywC6b-rM(kFTdA)qv{k3 z+-LX}ciwaL*JHnfE=~`(#fL&aK%e5mxjWF>ta=}-_^K@ZwbqLCGQvt{3z8!cD7 zi_DFV8U0tF8RvJvz4{{tm)}qdoFv(>!`qx~*^z)mnSMVdKFx;CGeb<3VGI#l$_T{Q zj>u-<)tV2I1T-?7JazP7YGv8Zii~@s7~<#A?UZ_47zKgvB?%BqWfc}b%|>spZG3Fe zkpL?$w146=+s3#{Qt07OrOo>!*@#h}a>?O9YGqtg7foqLe6~$ZPtXNpm0?5&%1hX_ z<2$c!cArL$?LAkv?RyDpf^0-GK^IzWlE-*&e0H)w_T&Cc(t{@HL6h{rfBLV|J6A}? ztr)DQY?>P@E`n?xU*C^kgtV3u3lH#%;Cbl_hE_j@yr@C9U&(@yxGw92f)DlWDK&TX zPOY5kJak0)y?psC`{`6UFzT7U#;%P}5PIU5~U&r z6`xw~m3_VFn7YM~n_fHGesmSZY2C1OQKg&ZzK4gBn@$cr`G2cECbZbZ}Wi3_YOvJuVm z7G0Sidu*MIHs4~xJ5!e7)SO0?J1l>c7M)^GuhVzFS|vyB6KZm3UTEhZYclxX)+7!8 z`LxIJUs03G{zOfF_J$Aj7up(KxtXA`n_XD-t`)e3F;Wk10}LHYRrTOxBETM%NXEuGfj!-m zE%kT={D_R%>FJM=FGk6gdbC1*-ebinK7o^C8k82+J$P@n^SsiZ9Gab`h{cEW;S;0L z()&PMHV~mHY61gmb$^kP563vPiJOT&UjA{jf)h$V9Mt9-Hp658ML#?Xiw1?TQUgqk z*-WphHRCbU{?tcXVCJO%=L!+@;I;{E}VEV638I54WNTP0Sq%j)?k}efExh5hHrhR#_ zLXOe=Zp5kQCDdJ&9UXM+g45yr6CSV6!8s1kv(TvvIo{WOLVNKNBFJJFKb0(~qQp>k zERP1EN$JCCEDKe?VAvWCV=(jG{1;`t-N^lFFK_GCZiw6HlAbe~1e`6cxX>-#D8OW! zdatw_g<0}kD(n)$q`P$6$h4nsR6g$cm#vP4y*39=5e97`Gzb+(M{b~ z6Wp8)k^tL)=?Q8u^dvL2gy6sh`{q&uQQJ*u>U1_Pak~jORDHHyTa|&FOm~ND?ZIHx zdZQoK<_cim?P|Z8j6N_tK{>L(S%TaHxLm;;IpPhujY!Lyfj^Sb?L3$Or5rtV2kHZu z))(#JYuW)V;ZFAFm-a(HUsPvM*a7>kXA+NX=!K_R;k7k)+aY-@vx-XXM;FafYK|W1 zfScjXR)g2eVehq6L+>swibFmxzWUIPvIT3kQ} z9d8EpHzh=;4Kkz^=VEbjum!YV>)X5{J%k=%$hFgh*|07*5Fh2;iZ~hP@qr04uwYr} z<2`S|owRjE0}nAciK!;kHk7~|-PIMFMKbh{vr&iK4er9*zR09SOIlFJ=}ft0dJD3x zjS06|n2#=UUhOx0E(3=f({U2KXE~@*}TZJ5(Km zDtBf5;u|P{6^2!_B7+1_cvypzFfSJ6mg^huV#y~}o#J>o|#!rx&y&E!Wg!pP!Tb_iiPULX;e95Dwpo~FrmPapYFOZXVb~@%9~Re zOXZp{5AUA< literal 0 HcmV?d00001 diff --git a/dataflow_calibration_ulite6_zed.yml b/dataflow_calibration_ulite6_zed.yml new file mode 100644 index 0000000..0e221dc --- /dev/null +++ b/dataflow_calibration_ulite6_zed.yml @@ -0,0 +1,63 @@ +nodes: + - id: zed_camera_cpp + build: bash -lc "cmake -S dora_zed_cpp -B dora_zed_cpp/build && cmake --build dora_zed_cpp/build" + path: dora_zed_cpp/build/dora_zed_cpp + env: + ZED_RESOLUTION: "720" + ZED_FPS: "15" + ZED_DEPTH_MODE: "NEURAL" + ZED_DEPTH_MIN_MM: "10" + ZED_DEPTH_MAX_MM: "600" + ZED_DEPTH_FILL: "false" + ZED_FLIP: "ON" + ZED_WARMUP_FRAMES: "30" + inputs: + tick: dora/timer/millis/100 + outputs: + - image_bgr + - camera_info + - id: ulite6 + build: uv pip install -e dora_ulite6 + path: dora_ulite6/dora_ulite6/main.py + inputs: + tick: dora/timer/millis/10 + command: calibration/robot_cmd + outputs: + - tcp_pose + - status + env: + ROBOT_IP: "192.168.1.192" + DEFAULT_SPEED: "30" + DEFAULT_UNITS: "mm" + API_HOST: "0.0.0.0" + API_PORT: "9000" + VACUUM_ENABLED: "false" + - id: calibration + build: | + uv venv -p 3.12 --seed --allow-existing + uv pip install -e dora_calibration + path: dora_calibration/dora_calibration/main.py + env: + VIRTUAL_ENV: ./.venv + IMAGE_INPUT: "image_bgr" + CAMERA_INFO_INPUT: "camera_info" + POSE_INPUT: "tcp_pose" + STATUS_INPUT: "status" + COMMAND_OUTPUT: "robot_cmd" + POSES_FILE: "config/calibration_poses_ulite6.yml" + OUTPUT_FILE: "calibration_ulite6_zed.npz" + METHOD: "TSAI" + SQUARES_X: "4" + SQUARES_Y: "6" + SQUARE_LENGTH: "0.04" + MARKER_LENGTH: "0.03" + WAIT_AFTER_MOVE: "2.0" + WAIT_BETWEEN_POSES: "2.0" + inputs: + image_bgr: zed_camera_cpp/image_bgr + camera_info: zed_camera_cpp/camera_info + tcp_pose: ulite6/tcp_pose + status: ulite6/status + tick: dora/timer/millis/100 + outputs: + - robot_cmd diff --git a/dataflow_ulite6.yml b/dataflow_ulite6.yml index 4f1eed5..e8e6ed1 100644 --- a/dataflow_ulite6.yml +++ b/dataflow_ulite6.yml @@ -13,5 +13,5 @@ nodes: DEFAULT_SPEED: "30" DEFAULT_UNITS: "mm" API_HOST: "0.0.0.0" - API_PORT: "8080" + API_PORT: "9000" VACUUM_ENABLED: "true" diff --git a/dora_calibration/README.md b/dora_calibration/README.md new file mode 100644 index 0000000..36f7f5e --- /dev/null +++ b/dora_calibration/README.md @@ -0,0 +1,50 @@ +# Dora Calibration Node + +Minimal eye-in-hand calibration node for a camera mounted on the robot gripper. + +## Inputs + +- `image` (configurable via `IMAGE_INPUT`): image stream with metadata containing `encoding` and `shape` or `width/height`. +- `camera_info` (optional): `[fx, fy, cx, cy]` float array with distortion in metadata. +- `tcp_pose`: `[x, y, z, roll, pitch, yaw]` from `dora_ulite6` in mm/deg. + +## Controls (OpenCV Window) + +- `q` quit +- `r` restart calibration (clear observations and start over) + +## Environment Variables + +| Variable | Default | Description | +| --- | --- | --- | +| `IMAGE_INPUT` | `image` | Image input name | +| `CAMERA_INFO_INPUT` | `camera_info` | Camera info input name | +| `POSE_INPUT` | `tcp_pose` | Robot pose input name | +| `STATUS_INPUT` | `status` | Robot status input name (used for command completion) | +| `COMMAND_OUTPUT` | `robot_cmd` | Output name for robot command messages | +| `POSES_FILE` | `config/calibration_poses.yml` | YAML file path for calibration poses | +| `POSE_UNITS` | `m` | Units for poses file (`m` or `mm`) | +| `MOVE_SPEED` | _(unset)_ | Move speed override (mm/s) | +| `OUTPUT_FILE` | `calibration.npz` | Output calibration file (observations saved as `{name}_observations.npz`) | +| `METHOD` | `TSAI` | Hand-eye method (TSAI, PARK, HORAUD, ANDREFF, DANIILIDIS) | +| `SQUARES_X` | `4` | ChArUco board squares X | +| `SQUARES_Y` | `6` | ChArUco board squares Y | +| `SQUARE_LENGTH` | `0.04` | Square length (m) | +| `MARKER_LENGTH` | `0.03` | Marker length (m) | +| `AUTO_RUN` | `true` | Automatically iterate poses and capture | +| `WAIT_AFTER_MOVE` | `2.0` | Seconds to wait after a move before capture | +| `MOVE_HOME_ON_START` | `true` | Move to calibration home when starting | +| `HOME_X` | `200` | Calibration home X (mm) | +| `HOME_Y` | `0` | Calibration home Y (mm) | +| `HOME_Z` | `300` | Calibration home Z (mm) | +| `HOME_ROLL` | `180` | Calibration home roll (deg) | +| `HOME_PITCH` | `0` | Calibration home pitch (deg) | +| `HOME_YAW` | `0` | Calibration home yaw (deg) | +| `MIN_CORNERS` | `6` | Minimum ChArUco corners required for pose | +| `LOG_INTERVAL` | `2.0` | Seconds between console detection logs | +| `AUTO_SOLVE` | `true` | Automatically solve and save after last pose | + +## Notes + +- Calibration runs automatically: moves through poses, captures, and saves results. +- For safe operation, keep a clear workspace around the robot. diff --git a/dora_calibration/dora_calibration/__init__.py b/dora_calibration/dora_calibration/__init__.py new file mode 100644 index 0000000..c39e34d --- /dev/null +++ b/dora_calibration/dora_calibration/__init__.py @@ -0,0 +1 @@ +"""Dora calibration node package.""" diff --git a/dora_calibration/dora_calibration/main.py b/dora_calibration/dora_calibration/main.py new file mode 100644 index 0000000..59a974e --- /dev/null +++ b/dora_calibration/dora_calibration/main.py @@ -0,0 +1,1054 @@ +"""Dora node for eye-in-hand calibration with minimal OpenCV UI.""" + +from __future__ import annotations + +import argparse +from collections import deque +import json +import os +import time +import uuid +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Tuple + +import cv2 +import numpy as np +import pyarrow as pa +import yaml +from dora import Node + +WINDOW_NAME = "Dora Eye-in-Hand Calibration" +DEFAULT_DICT = cv2.aruco.DICT_6X6_1000 + + +@dataclass +class CameraIntrinsics: + fx: float + fy: float + cx: float + cy: float + distortion: np.ndarray + + def camera_matrix(self) -> np.ndarray: + return np.array( + [[self.fx, 0.0, self.cx], [0.0, self.fy, self.cy], [0.0, 0.0, 1.0]], + dtype=np.float64, + ) + + +@dataclass +class CalibrationObservation: + base_T_gripper: np.ndarray + cam_T_target: np.ndarray + + +@dataclass +class CalibrationState: + intrinsics: Optional[CameraIntrinsics] = None + latest_tcp_pose: Optional[List[float]] = None + latest_tcp_pose_at: Optional[float] = None + observations: List[CalibrationObservation] = field(default_factory=list) + last_board_pose: Optional[Tuple[np.ndarray, np.ndarray]] = None + pose_list: Optional[List[Dict[str, Any]]] = None + pose_index: int = 0 + last_commanded_pose_index: Optional[int] = None + last_move_pose_index: Optional[int] = None + awaiting_capture_pose_index: Optional[int] = None + last_target_pose: Optional[Dict[str, Any]] = None + status_text: str = "" + pending_command: Optional[Dict[str, Any]] = None + last_move_done_at: Optional[float] = None + awaiting_capture: bool = False + last_log_time: float = 0.0 + auto_done: bool = False + last_image_at: Optional[float] = None + last_status_at: Optional[float] = None + last_robot_state: Optional[int] = None + last_robot_idle_at: Optional[float] = None + pose_history: deque[Tuple[float, List[float]]] = field( + default_factory=lambda: deque(maxlen=10) + ) + intrinsics_logged: bool = False + capture_gate: bool = False + last_capture_done_at: Optional[float] = None + waiting_for_stop: bool = False + + +def _rotation_matrix_xyz( + roll_deg: float, pitch_deg: float, yaw_deg: float +) -> np.ndarray: + roll = np.deg2rad(roll_deg) + pitch = np.deg2rad(pitch_deg) + yaw = np.deg2rad(yaw_deg) + cx, sx = np.cos(roll), np.sin(roll) + cy, sy = np.cos(pitch), np.sin(pitch) + cz, sz = np.cos(yaw), np.sin(yaw) + rot_x = np.array([[1.0, 0.0, 0.0], [0.0, cx, -sx], [0.0, sx, cx]]) + rot_y = np.array([[cy, 0.0, sy], [0.0, 1.0, 0.0], [-sy, 0.0, cy]]) + rot_z = np.array([[cz, -sz, 0.0], [sz, cz, 0.0], [0.0, 0.0, 1.0]]) + return rot_z @ rot_y @ rot_x + + +def _pose_to_matrix(tcp_pose: List[float]) -> np.ndarray: + x, y, z, roll, pitch, yaw = tcp_pose + rot = _rotation_matrix_xyz(roll, pitch, yaw) + mat = np.eye(4) + mat[:3, :3] = rot + mat[:3, 3] = np.array([x, y, z], dtype=np.float64) / 1000.0 + return mat + + +def _try_get_charuco_board( + squares_x: int, squares_y: int, square_length: float, marker_length: float +) -> Any: + dictionary = cv2.aruco.getPredefinedDictionary(DEFAULT_DICT) + try: + board = cv2.aruco.CharucoBoard( + (squares_x, squares_y), square_length, marker_length, dictionary + ) + except AttributeError: + board = cv2.aruco.CharucoBoard_create( + squares_x, squares_y, square_length, marker_length, dictionary + ) + if hasattr(board, "setLegacyPattern"): + board.setLegacyPattern(True) + return board + + +def _create_charuco_detector(board: Any) -> Optional[Any]: + if not hasattr(cv2.aruco, "CharucoDetector"): + return None + aruco_params = cv2.aruco.DetectorParameters() + charuco_params = cv2.aruco.CharucoParameters() + charuco_params.minMarkers = 2 + return cv2.aruco.CharucoDetector(board, charuco_params, aruco_params) + + +def _decode_image(storage: pa.Array, metadata: Dict[str, Any]) -> np.ndarray: + encoding = str(metadata.get("encoding", "bgr8")).lower() + width = metadata.get("width") + height = metadata.get("height") + if (width is None or height is None) and "shape" in metadata: + shape = metadata.get("shape") + if isinstance(shape, (list, tuple)) and len(shape) >= 2: + height = height if height is not None else int(shape[0]) + width = width if width is not None else int(shape[1]) + if width is None or height is None: + raise KeyError("width/height (or shape) missing from metadata") + + if encoding == "bgr8": + frame = storage.to_numpy().astype(np.uint8).reshape((height, width, 3)) + return frame.copy() + if encoding == "rgb8": + frame = storage.to_numpy().astype(np.uint8).reshape((height, width, 3)) + return cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + if encoding in {"jpeg", "jpg", "jpe", "bmp", "webp", "png"}: + frame = storage.to_numpy().astype(np.uint8) + return cv2.imdecode(frame, cv2.IMREAD_COLOR) + if encoding == "yuv420": + yuv = storage.to_numpy().astype(np.uint8) + yuv = yuv[: width * height * 3 // 2].reshape((height * 3 // 2, width)) + return cv2.cvtColor(yuv, cv2.COLOR_YUV420p2BGR) + + raise RuntimeError(f"Unsupported image encoding: {encoding}") + + +def _intrinsics_from_metadata(metadata: Dict[str, Any]) -> Optional[CameraIntrinsics]: + if not all(k in metadata for k in ("fx", "fy", "cx", "cy")): + return None + distortion = metadata.get("distortion", []) + if distortion is None: + distortion = [] + dist = np.array(distortion, dtype=np.float64).reshape(-1) + if dist.size == 0: + dist = np.zeros(5, dtype=np.float64) + return CameraIntrinsics( + fx=float(metadata["fx"]), + fy=float(metadata["fy"]), + cx=float(metadata["cx"]), + cy=float(metadata["cy"]), + distortion=dist, + ) + + +def _intrinsics_from_camera_info( + values: np.ndarray, metadata: Dict[str, Any] +) -> Optional[CameraIntrinsics]: + if values.size < 4: + return None + fx, fy, cx, cy = [float(v) for v in values[:4]] + distortion = metadata.get("distortion", []) + if distortion is None: + distortion = [] + dist = np.array(distortion, dtype=np.float64).reshape(-1) + if dist.size == 0: + dist = np.zeros(5, dtype=np.float64) + return CameraIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy, distortion=dist) + + +def _load_calibration_poses(path: str) -> List[Dict[str, Any]]: + with open(path) as handle: + data = yaml.safe_load(handle) + return data.get("calibration_poses", []) + + +def _send_dora_command( + node: Node, output_name: str, action: str, payload: Dict[str, Any] +) -> str: + command_id = str(uuid.uuid4()) + message = {"id": command_id, "action": action, "payload": payload} + node.send_output( + output_name, + pa.array([json.dumps(message)]), + metadata={"encoding": "json", "timestamp_ns": time.time_ns()}, + ) + return command_id + + +def _parse_status_payload(value: pa.Array) -> Optional[Dict[str, Any]]: + if len(value) == 0: + return None + raw = value[0].as_py() + if not raw: + return None + try: + return json.loads(raw) + except Exception: + return None + + +def _parse_idle_states(raw: str) -> List[int]: + values: List[int] = [] + for part in raw.split(","): + part = part.strip() + if not part: + continue + try: + values.append(int(part)) + except ValueError: + continue + return values + + +def _pose_is_stable( + pose_history: deque[Tuple[float, List[float]]], + window_s: float, + eps_mm: float, + eps_deg: float, +) -> bool: + if not pose_history: + return False + latest_t = pose_history[-1][0] + window = [p for (t, p) in pose_history if latest_t - t <= window_s] + if len(window) < 2: + return False + positions = np.array([p[:3] for p in window], dtype=np.float64) + rotations = np.array([p[3:6] for p in window], dtype=np.float64) + pos_span = positions.max(axis=0) - positions.min(axis=0) + rot_span = rotations.max(axis=0) - rotations.min(axis=0) + return bool(np.all(pos_span <= eps_mm) and np.all(rot_span <= eps_deg)) + + +def _select_pose_for_capture( + pose_history: deque[Tuple[float, List[float]]], + image_time: Optional[float], + sync_warn_threshold_ms: float = 100.0, +) -> Optional[List[float]]: + if not pose_history: + return None + if image_time is None: + return pose_history[-1][1] + # Choose the newest pose at or before the image time. + candidates = [(t, p) for (t, p) in pose_history if t <= image_time] + if candidates: + pose_time, pose = candidates[-1] + sync_delta_ms = (image_time - pose_time) * 1000 + if sync_delta_ms > sync_warn_threshold_ms: + print( + f"WARN: pose-image sync delta {sync_delta_ms:.0f}ms > {sync_warn_threshold_ms:.0f}ms threshold", + flush=True, + ) + return pose + return pose_history[-1][1] + + +def _estimate_charuco_pose( + frame: np.ndarray, + board: Any, + intrinsics: CameraIntrinsics, + detector: Optional[Any], + min_corners: int, +) -> Tuple[ + Optional[np.ndarray], + Optional[np.ndarray], + np.ndarray, + bool, + int, + int, + bool, +]: + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if detector is not None: + charuco_corners, charuco_ids, marker_corners, marker_ids = detector.detectBoard( + frame + ) + else: + dictionary = ( + board.dictionary if hasattr(board, "dictionary") else board.getDictionary() + ) + marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(gray, dictionary) + charuco_corners, charuco_ids = None, None + if marker_ids is not None and len(marker_ids) > 0: + _, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco( + marker_corners, marker_ids, gray, board + ) + debug = frame.copy() + marker_count = 0 if marker_ids is None else len(marker_ids) + if marker_count == 0: + return None, None, debug, False, 0, 0, False + + cv2.aruco.drawDetectedMarkers(debug, marker_corners, marker_ids) + charuco_count = 0 if charuco_ids is None else len(charuco_ids) + if charuco_count < min_corners: + return None, None, debug, True, marker_count, charuco_count, False + + cv2.aruco.drawDetectedCornersCharuco(debug, charuco_corners, charuco_ids) + obj_points = board.getChessboardCorners()[charuco_ids.flatten()] + ok, rvec, tvec = cv2.solvePnP( + obj_points, + charuco_corners, + intrinsics.camera_matrix(), + intrinsics.distortion, + flags=cv2.SOLVEPNP_IPPE, + ) + if not ok: + return None, None, debug, True, marker_count, charuco_count, False + + rvec, tvec = cv2.solvePnPRefineVVS( + obj_points, + charuco_corners, + intrinsics.camera_matrix(), + intrinsics.distortion, + rvec, + tvec, + ) + + cv2.drawFrameAxes( + debug, + intrinsics.camera_matrix(), + intrinsics.distortion, + rvec, + tvec, + 0.05, + ) + return rvec, tvec, debug, True, marker_count, charuco_count, True + + +def _calibrate_hand_eye( + observations: List[CalibrationObservation], method: int +) -> Tuple[np.ndarray, Dict[str, float]]: + r_gripper2base: List[np.ndarray] = [] + t_gripper2base: List[np.ndarray] = [] + r_target2cam: List[np.ndarray] = [] + t_target2cam: List[np.ndarray] = [] + + for obs in observations: + r_gripper2base.append(obs.base_T_gripper[:3, :3]) + t_gripper2base.append(obs.base_T_gripper[:3, 3].reshape(3, 1)) + r_target2cam.append(obs.cam_T_target[:3, :3]) + t_target2cam.append(obs.cam_T_target[:3, 3].reshape(3, 1)) + + r_cam2gripper, t_cam2gripper = cv2.calibrateHandEye( + r_gripper2base, t_gripper2base, r_target2cam, t_target2cam, method=method + ) + + cam2gripper = np.eye(4) + cam2gripper[:3, :3] = r_cam2gripper + cam2gripper[:3, 3] = t_cam2gripper.reshape(3) + + base_targets = [] + for obs in observations: + base_T_target = obs.base_T_gripper @ cam2gripper @ obs.cam_T_target + base_targets.append(base_T_target[:3, 3]) + base_targets = np.array(base_targets) + mean_target = base_targets.mean(axis=0) + errors = np.linalg.norm(base_targets - mean_target[None, :], axis=1) + info = { + "mean_translation_error_m": float(errors.mean()), + "max_translation_error_m": float(errors.max()), + } + return cam2gripper, info + + +def _save_calibration( + filename: str, + cam2gripper: np.ndarray, + method_name: str, + observations: List[CalibrationObservation], + intrinsics: CameraIntrinsics, +) -> None: + save_data = { + "T_cam2gripper": cam2gripper, + "calibration_type": "eye_in_hand", + "method": method_name, + "num_observations": len(observations), + "camera_matrix": intrinsics.camera_matrix(), + "distortion": intrinsics.distortion, + } + np.savez(filename, **save_data) + + obs_file = os.path.join( + os.path.dirname(filename), + f"{os.path.splitext(os.path.basename(filename))[0]}_observations.npz", + ) + r_gripper2base = [obs.base_T_gripper[:3, :3] for obs in observations] + t_gripper2base = [obs.base_T_gripper[:3, 3:4] for obs in observations] + r_target2cam = [obs.cam_T_target[:3, :3] for obs in observations] + t_target2cam = [obs.cam_T_target[:3, 3:4] for obs in observations] + np.savez( + obs_file, + R_gripper2base=r_gripper2base, + t_gripper2base=t_gripper2base, + R_target2cam=r_target2cam, + t_target2cam=t_target2cam, + ) + + +def _method_from_name(name: str) -> int: + name = name.upper() + methods = { + "TSAI": cv2.CALIB_HAND_EYE_TSAI, + "PARK": cv2.CALIB_HAND_EYE_PARK, + "HORAUD": cv2.CALIB_HAND_EYE_HORAUD, + "ANDREFF": cv2.CALIB_HAND_EYE_ANDREFF, + "DANIILIDIS": cv2.CALIB_HAND_EYE_DANIILIDIS, + } + return methods.get(name, cv2.CALIB_HAND_EYE_TSAI) + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Eye-in-hand calibration node with minimal OpenCV UI.", + ) + parser.add_argument("--name", type=str, default="dora-calibration") + args = parser.parse_args() + + image_input = os.getenv("IMAGE_INPUT", "image") + camera_info_input = os.getenv("CAMERA_INFO_INPUT", "camera_info") + pose_input = os.getenv("POSE_INPUT", "tcp_pose") + status_input = os.getenv("STATUS_INPUT", "status") + command_output = os.getenv("COMMAND_OUTPUT", "robot_cmd") + poses_file = os.getenv("POSES_FILE", "config/calibration_poses.yml") + pose_units = os.getenv("POSE_UNITS", "m") + move_speed = os.getenv("MOVE_SPEED") + output_file = os.getenv("OUTPUT_FILE", "calibration.npz") + method_name = os.getenv("METHOD", "TSAI") + auto_run = os.getenv("AUTO_RUN", "true").lower() in ("true", "1", "yes") + min_corners = int(os.getenv("MIN_CORNERS", "6")) + log_interval = float(os.getenv("LOG_INTERVAL", "2.0")) + log_after_done = os.getenv("LOG_AFTER_DONE", "false").lower() in ("true", "1", "yes") + log_markers = os.getenv("LOG_MARKERS", "false").lower() in ("true", "1", "yes") + auto_solve = os.getenv("AUTO_SOLVE", "true").lower() in ("true", "1", "yes") + wait_after_move = float(os.getenv("WAIT_AFTER_MOVE", "2.0")) + max_pose_age = float(os.getenv("MAX_POSE_AGE", "0.5")) + pose_stability_window = float(os.getenv("POSE_STABILITY_WINDOW", "0.5")) + pose_stability_eps_mm = float(os.getenv("POSE_STABILITY_EPS_MM", "0.5")) + pose_stability_eps_deg = float(os.getenv("POSE_STABILITY_EPS_DEG", "0.5")) + require_status_idle = os.getenv("REQUIRE_STATUS_IDLE", "false").lower() in ( + "true", + "1", + "yes", + ) + idle_states = _parse_idle_states(os.getenv("IDLE_STATES", "")) + wait_between_poses = float(os.getenv("WAIT_BETWEEN_POSES", "0.0")) + move_home_on_start = os.getenv("MOVE_HOME_ON_START", "true").lower() in ( + "true", + "1", + "yes", + ) + home_x = float(os.getenv("HOME_X", "250")) + home_y = float(os.getenv("HOME_Y", "0")) + home_z = float(os.getenv("HOME_Z", "400")) + home_roll = float(os.getenv("HOME_ROLL", "180")) + home_pitch = float(os.getenv("HOME_PITCH", "0")) + home_yaw = float(os.getenv("HOME_YAW", "0")) + + squares_x = int(os.getenv("SQUARES_X", "4")) + squares_y = int(os.getenv("SQUARES_Y", "6")) + square_length = float(os.getenv("SQUARE_LENGTH", "0.04")) + marker_length = float(os.getenv("MARKER_LENGTH", "0.03")) + + move_speed_val = float(move_speed) if move_speed else None + + state = CalibrationState() + if poses_file and not os.path.isabs(poses_file): + config_path = os.path.join("config", poses_file) + poses_file = config_path if os.path.exists(config_path) else poses_file + + if poses_file: + if os.path.exists(poses_file): + state.pose_list = _load_calibration_poses(poses_file) + state.status_text = f"Loaded {len(state.pose_list)} poses" + else: + state.status_text = f"Poses file not found: {poses_file}" + + board = _try_get_charuco_board(squares_x, squares_y, square_length, marker_length) + detector = _create_charuco_detector(board) + method = _method_from_name(method_name) + + node = Node(args.name) + cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) + started = False + + for event in node: + if event["type"] != "INPUT": + continue + + now = time.monotonic() + if not started: + started = True + if auto_run and move_home_on_start: + payload = { + "x": home_x, + "y": home_y, + "z": home_z, + "roll": home_roll, + "pitch": home_pitch, + "yaw": home_yaw, + } + cmd_id = _send_dora_command( + node, command_output, "move_to_pose", payload + ) + state.pending_command = { + "id": cmd_id, + "action": "move_to_pose", + "sent_at": now, + } + state.status_text = "Sent calibration home" + if state.pending_command: + elapsed = now - state.pending_command["sent_at"] + if elapsed > 30.0: + state.status_text = ( + f"Command timeout ({state.pending_command['action']})" + ) + state.pending_command = None + + if event["id"] == status_input: + payload = _parse_status_payload(event["value"]) + state.last_status_at = now + if payload and state.pending_command: + if payload.get("command_id") == state.pending_command["id"]: + ok = bool(payload.get("ok", False)) + msg = payload.get("message", "") + state.status_text = "Command ok" if ok else f"Command failed: {msg}" + state.pending_command = None + if ok: + state.last_move_done_at = None + if state.last_commanded_pose_index is not None: + state.awaiting_capture = True + state.last_move_pose_index = state.last_commanded_pose_index + state.awaiting_capture_pose_index = ( + state.last_commanded_pose_index + ) + state.last_commanded_pose_index = None + state.capture_gate = False + state.waiting_for_stop = True + if payload and "state" in payload: + try: + state.last_robot_state = int(payload["state"]) + except (TypeError, ValueError): + state.last_robot_state = None + if state.last_robot_state is not None and state.last_robot_state in idle_states: + state.last_robot_idle_at = now + continue + + if event["id"] == pose_input: + tcp_pose = event["value"].to_numpy().astype(np.float64).reshape(-1) + if tcp_pose.size >= 6: + state.latest_tcp_pose = tcp_pose[:6].tolist() + state.latest_tcp_pose_at = now + state.pose_history.append((now, state.latest_tcp_pose)) + continue + + if event["id"] == camera_info_input: + raw_bytes = event["value"].to_numpy().astype(np.uint8).tobytes() + dtype_str = str(event["metadata"].get("dtype", "float32")) + dtype_map = {"float32": np.float32, "float64": np.float64} + dtype = dtype_map.get(dtype_str, np.float32) + values = np.frombuffer(raw_bytes, dtype=dtype).astype(np.float64) + intrinsics = _intrinsics_from_camera_info(values, event["metadata"]) + if intrinsics: + state.intrinsics = intrinsics + if not state.intrinsics_logged: + state.intrinsics_logged = True + print( + f"intrinsics fx={intrinsics.fx:.4f} fy={intrinsics.fy:.4f} " + f"cx={intrinsics.cx:.4f} cy={intrinsics.cy:.4f} " + f"dist={intrinsics.distortion.tolist()}", + flush=True, + ) + continue + + if event["id"] != image_input: + # Non-image events drive the move + capture gate state machine. + if ( + auto_run + and state.awaiting_capture + and not state.pending_command + ): + if state.awaiting_capture_pose_index is None: + state.awaiting_capture = False + continue + if ( + state.last_move_pose_index is None + or state.last_move_pose_index != state.awaiting_capture_pose_index + ): + continue + if state.last_move_done_at is None: + if state.waiting_for_stop and _pose_is_stable( + state.pose_history, + pose_stability_window, + pose_stability_eps_mm, + pose_stability_eps_deg, + ): + state.last_move_done_at = now + state.waiting_for_stop = False + print( + f"pose_stable pose={state.awaiting_capture_pose_index} at={now:.3f}", + flush=True, + ) + else: + state.status_text = "Waiting for pose to stabilize" + continue + if state.last_move_done_at is None: + state.awaiting_capture = False + elif now - state.last_move_done_at < wait_after_move: + state.status_text = "Waiting after move" + continue + elif require_status_idle and idle_states and state.last_status_at is not None: + if ( + state.last_robot_state is None + or state.last_robot_state not in idle_states + ): + state.status_text = "Waiting for robot idle state" + elif ( + state.last_robot_idle_at is None + or state.last_robot_idle_at < state.last_move_done_at + ): + state.status_text = "Waiting for idle after move" + else: + state.capture_gate = True + state.status_text = "Capture gate armed" + print("capture_gate armed (idle)", flush=True) + else: + state.capture_gate = True + state.status_text = "Capture gate armed" + print("capture_gate armed", flush=True) + + if auto_run and state.pose_list and not state.awaiting_capture: + if ( + state.last_capture_done_at is not None + and now - state.last_capture_done_at < wait_between_poses + ): + state.status_text = "Waiting between poses" + continue + if state.pending_command: + continue + if state.pose_index < len(state.pose_list): + pose = state.pose_list[state.pose_index] + pos = pose.get("position", {}) + ori = pose.get("orientation_rpy_deg", {}) + x, y, z = ( + float(pos.get("x", 0.0)), + float(pos.get("y", 0.0)), + float(pos.get("z", 0.0)), + ) + if pose_units == "m": + x *= 1000.0 + y *= 1000.0 + z *= 1000.0 + payload = { + "x": x, + "y": y, + "z": z, + "roll": float(ori.get("roll", 0.0)), + "pitch": float(ori.get("pitch", 0.0)), + "yaw": float(ori.get("yaw", 0.0)), + } + if move_speed_val is not None: + payload["speed"] = move_speed_val + cmd_id = _send_dora_command( + node, command_output, "move_to_pose", payload + ) + state.pending_command = { + "id": cmd_id, + "action": "move_to_pose", + "sent_at": now, + } + state.status_text = f"Auto move {state.pose_index + 1}" + state.last_commanded_pose_index = state.pose_index + 1 + state.last_target_pose = pose + state.pose_index += 1 + elif auto_solve and not state.auto_done: + if len(state.observations) >= 3: + cam2gripper, info = _calibrate_hand_eye( + state.observations, method + ) + _save_calibration( + output_file, + cam2gripper, + method_name, + state.observations, + state.intrinsics, + ) + state.status_text = ( + "Saved calibration | " + f"mean err {info['mean_translation_error_m'] * 1000:.2f}mm " + f"max {info['max_translation_error_m'] * 1000:.2f}mm" + ) + print("Calibration Result (T_cam2gripper):") + print(cam2gripper) + print( + f"Mean translation error: {info['mean_translation_error_m']*1000:.2f} mm" + ) + print( + f"Max translation error: {info['max_translation_error_m']*1000:.2f} mm" + ) + else: + state.status_text = "Need at least 3 observations" + state.auto_done = True + continue + + storage = event["value"] + metadata = event.get("metadata", {}) + if state.intrinsics is None: + intrinsics = _intrinsics_from_metadata(metadata) + if intrinsics: + state.intrinsics = intrinsics + if not state.intrinsics_logged: + state.intrinsics_logged = True + print( + f"intrinsics fx={intrinsics.fx:.4f} fy={intrinsics.fy:.4f} " + f"cx={intrinsics.cx:.4f} cy={intrinsics.cy:.4f} " + f"dist={intrinsics.distortion.tolist()}", + flush=True, + ) + + frame = _decode_image(storage, metadata) + state.last_image_at = now + image_frame_id = metadata.get("frame_id") + image_ts_ns = metadata.get("timestamp_ns") + debug = frame.copy() + + if state.intrinsics is None: + cv2.putText( + debug, + "Missing camera intrinsics (fx,fy,cx,cy)", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + (0, 0, 255), + 2, + ) + cv2.imshow(WINDOW_NAME, debug) + if cv2.waitKey(1) & 0xFF == ord("q"): + break + continue + + rvec, tvec, debug, detected, marker_count, charuco_count, pose_ok = ( + _estimate_charuco_pose( + frame, board, state.intrinsics, detector, min_corners + ) + ) + if ( + log_markers + and now - state.last_log_time >= log_interval + and (log_after_done or not state.auto_done) + ): + state.last_log_time = now + print( + f"markers={marker_count} charuco={charuco_count} pose_ok={pose_ok}", + flush=True, + ) + state.last_board_pose = (rvec, tvec) if pose_ok else None + + if marker_count == 0: + status = "NO MARKERS" + color = (0, 0, 255) + elif not pose_ok: + status = "MARKERS ONLY" + color = (0, 165, 255) + else: + status = "DETECTED" + color = (0, 255, 0) + cv2.putText( + debug, + f"ChArUco: {status} | Obs: {len(state.observations)}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + color, + 2, + ) + cv2.putText( + debug, + f"Markers: {marker_count} | Charuco: {charuco_count}", + (10, 55), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 255), + 1, + ) + cv2.putText( + debug, + f"Auto: {'ON' if auto_run else 'OFF'}", + (10, 60), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 255), + 1, + ) + if state.pose_list: + total_poses = len(state.pose_list) + next_pose = ( + state.pose_index + 1 if state.pose_index < total_poses else None + ) + last_pose = state.last_move_pose_index + pose_text = ( + f"Last pose {last_pose}/{total_poses} | Next pose {next_pose}/{total_poses}" + if next_pose is not None + else f"Last pose {last_pose}/{total_poses} | Next pose --/{total_poses}" + ) + cv2.putText( + debug, + pose_text, + (10, 85), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 255), + 1, + ) + cv2.putText( + debug, + f"Obs: {len(state.observations)}", + (10, 110), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 255), + 1, + ) + if state.status_text: + cv2.putText( + debug, + state.status_text, + (10, 135), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 255), + 1, + ) + if state.capture_gate: + cv2.putText( + debug, + "Capture gate armed", + (10, 160), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, + (255, 255, 0), + 1, + ) + + cv2.putText( + debug, + "Keys: r=restart, q=quit", + (10, debug.shape[0] - 10), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + (255, 255, 255), + 1, + ) + + cv2.imshow(WINDOW_NAME, debug) + key = cv2.waitKey(1) & 0xFF + + if key == ord("q"): + break + if key == ord("r"): + state.observations.clear() + state.pose_index = 0 + state.pending_command = None + state.awaiting_capture = False + state.capture_gate = False + state.last_capture_done_at = None + state.waiting_for_stop = False + state.auto_done = False + state.status_text = "Calibration restarted" + + if auto_run and state.pose_list: + if state.pending_command: + continue + if state.awaiting_capture: + if not state.capture_gate: + continue + if state.awaiting_capture_pose_index is None: + state.awaiting_capture = False + continue + if state.last_move_done_at is None: + state.awaiting_capture = False + elif now - state.last_move_done_at < wait_after_move: + continue + elif require_status_idle and idle_states and state.last_status_at is not None: + if ( + state.last_robot_state is None + or state.last_robot_state not in idle_states + ): + state.status_text = "Waiting for robot idle state" + continue + if ( + state.last_robot_idle_at is None + or state.last_robot_idle_at < state.last_move_done_at + ): + state.status_text = "Waiting for idle after move" + continue + if ( + state.latest_tcp_pose_at is None + or now - state.latest_tcp_pose_at > max_pose_age + ): + state.status_text = "Waiting for fresh tcp_pose" + continue + if not _pose_is_stable( + state.pose_history, + pose_stability_window, + pose_stability_eps_mm, + pose_stability_eps_deg, + ): + state.status_text = "Waiting for stable pose" + continue + elif pose_ok and state.last_board_pose and state.latest_tcp_pose: + rvec, tvec = state.last_board_pose + rot, _ = cv2.Rodrigues(rvec) + cam_T_target = np.eye(4) + cam_T_target[:3, :3] = rot + cam_T_target[:3, 3] = tvec.reshape(3) + pose_for_capture = _select_pose_for_capture( + state.pose_history, state.last_image_at + ) + if pose_for_capture is None: + state.status_text = "No pose for capture" + continue + base_T_gripper = _pose_to_matrix(pose_for_capture) + state.observations.append( + CalibrationObservation( + base_T_gripper=base_T_gripper, + cam_T_target=cam_T_target, + ) + ) + state.status_text = f"Auto captured {len(state.observations)}" + print( + f"capture_done pose={state.last_move_pose_index} at={now:.3f}", + flush=True, + ) + tvec_norm = float(np.linalg.norm(tvec)) + print( + f"auto_capture pose={state.awaiting_capture_pose_index} " + f"obs={len(state.observations)} markers={marker_count} " + f"charuco={charuco_count} pose_ok={pose_ok} " + f"board_tvec_norm_m={tvec_norm:.4f}", + flush=True, + ) + if image_ts_ns is not None: + age_ms = (time.monotonic_ns() - int(image_ts_ns)) / 1e6 + print( + f"image_frame_id={image_frame_id} image_age_ms={age_ms:.1f}", + flush=True, + ) + if state.last_target_pose: + print( + f"desired_pose={state.last_target_pose}", + flush=True, + ) + print( + f"robot_pose={pose_for_capture}", + flush=True, + ) + state.awaiting_capture = False + state.awaiting_capture_pose_index = None + state.capture_gate = False + state.last_capture_done_at = now + continue + + if ( + state.last_capture_done_at is not None + and now - state.last_capture_done_at < wait_between_poses + ): + continue + + if state.pose_index < len(state.pose_list): + pose = state.pose_list[state.pose_index] + pos = pose.get("position", {}) + ori = pose.get("orientation_rpy_deg", {}) + x, y, z = ( + float(pos.get("x", 0.0)), + float(pos.get("y", 0.0)), + float(pos.get("z", 0.0)), + ) + if pose_units == "m": + x *= 1000.0 + y *= 1000.0 + z *= 1000.0 + payload = { + "x": x, + "y": y, + "z": z, + "roll": float(ori.get("roll", 0.0)), + "pitch": float(ori.get("pitch", 0.0)), + "yaw": float(ori.get("yaw", 0.0)), + } + if move_speed_val is not None: + payload["speed"] = move_speed_val + cmd_id = _send_dora_command( + node, command_output, "move_to_pose", payload + ) + state.pending_command = { + "id": cmd_id, + "action": "move_to_pose", + "sent_at": now, + } + state.status_text = f"Auto move {state.pose_index + 1}" + state.last_commanded_pose_index = state.pose_index + 1 + state.last_target_pose = pose + state.pose_index += 1 + elif auto_solve and not state.auto_done: + if len(state.observations) >= 3: + cam2gripper, info = _calibrate_hand_eye( + state.observations, method + ) + _save_calibration( + output_file, + cam2gripper, + method_name, + state.observations, + state.intrinsics, + ) + state.status_text = ( + "Saved calibration | " + f"mean err {info['mean_translation_error_m'] * 1000:.2f}mm " + f"max {info['max_translation_error_m'] * 1000:.2f}mm" + ) + print("Calibration Result (T_cam2gripper):") + print(cam2gripper) + print( + f"Mean translation error: {info['mean_translation_error_m']*1000:.2f} mm" + ) + print( + f"Max translation error: {info['max_translation_error_m']*1000:.2f} mm" + ) + else: + state.status_text = "Need at least 3 observations" + state.auto_done = True + + cv2.destroyAllWindows() + + +if __name__ == "__main__": + main() diff --git a/dora_calibration/pyproject.toml b/dora_calibration/pyproject.toml new file mode 100644 index 0000000..72fec57 --- /dev/null +++ b/dora_calibration/pyproject.toml @@ -0,0 +1,33 @@ +[project] +name = "dora-calibration" +version = "0.1.0" +license = { file = "MIT" } +authors = [{ name = "Dora" }] +description = "Dora node for eye-in-hand calibration with minimal OpenCV UI" + +requires-python = ">=3.8" + +dependencies = [ + "dora-rs >= 0.3.9", + "numpy < 2.0.0", + "opencv-contrib-python >= 4.1.1", + "pyyaml >= 6.0", +] + +[dependency-groups] +dev = ["pytest >=8.1.1", "ruff >=0.9.1"] + +[project.scripts] +dora-calibration = "dora_calibration.main:main" + +[tool.ruff.lint] +extend-select = [ + "D", + "UP", + "PERF", + "RET", + "RSE", + "NPY", + "N", + "I", +] diff --git a/dora_calibration/tools/analyze_observations.py b/dora_calibration/tools/analyze_observations.py new file mode 100644 index 0000000..df662e9 --- /dev/null +++ b/dora_calibration/tools/analyze_observations.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +"""Analyze saved observations for issues.""" +import argparse +import numpy as np +from scipy.spatial.transform import Rotation + + +def main(): + parser = argparse.ArgumentParser(description="Analyze hand-eye observations for issues.") + parser.add_argument("observations", nargs="?", default="calibration_observations.npz", + help="Path to observations .npz file") + args = parser.parse_args() + + # Load dora observations + dora_obs = np.load(args.observations, allow_pickle=True) + + print("=== OBSERVATIONS ANALYSIS ===") + print(f"File: {args.observations}") + print(f"Keys: {list(dora_obs.keys())}") + + R_gripper2base = dora_obs['R_gripper2base'] + t_gripper2base = dora_obs['t_gripper2base'] + R_target2cam = dora_obs['R_target2cam'] + t_target2cam = dora_obs['t_target2cam'] + + n = len(R_gripper2base) + print(f"Count: {n}") + + print("\n=== GRIPPER POSES (base_T_gripper) ===") + for i in range(n): + R = R_gripper2base[i] + t = t_gripper2base[i].flatten() + + # Check rotation matrix validity + det = np.linalg.det(R) + orth = np.allclose(R @ R.T, np.eye(3), atol=1e-6) + rpy = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + + print(f"\nObs {i+1}:") + print(f" det(R)={det:.6f}, orthogonal={orth}") + print(f" t(mm)=[{t[0]*1000:.1f}, {t[1]*1000:.1f}, {t[2]*1000:.1f}]") + print(f" RPY(deg)=[{rpy[0]:.1f}, {rpy[1]:.1f}, {rpy[2]:.1f}]") + + if not orth: + print(" WARNING: Rotation matrix not orthogonal!") + if abs(det - 1.0) > 1e-6: + print(f" WARNING: Rotation matrix determinant is {det}, expected 1.0!") + + print("\n=== TARGET POSES (cam_T_target) ===") + for i in range(n): + R = R_target2cam[i] + t = t_target2cam[i].flatten() + + det = np.linalg.det(R) + orth = np.allclose(R @ R.T, np.eye(3), atol=1e-6) + + print(f"\nObs {i+1}:") + print(f" det(R)={det:.6f}, orthogonal={orth}") + print(f" t(m)=[{t[0]:.4f}, {t[1]:.4f}, {t[2]:.4f}]") + print(f" distance from camera: {np.linalg.norm(t)*1000:.1f}mm") + + # Check for pose diversity (important for calibration) + print("\n=== POSE DIVERSITY CHECK ===") + positions = np.array([t_gripper2base[i].flatten() for i in range(n)]) + pos_range = positions.max(axis=0) - positions.min(axis=0) + print(f"Position range (mm): X={pos_range[0]*1000:.1f}, Y={pos_range[1]*1000:.1f}, Z={pos_range[2]*1000:.1f}") + + rotations = np.array([Rotation.from_matrix(R_gripper2base[i]).as_euler('xyz', degrees=True) for i in range(n)]) + rot_range = rotations.max(axis=0) - rotations.min(axis=0) + print(f"Rotation range (deg): Roll={rot_range[0]:.1f}, Pitch={rot_range[1]:.1f}, Yaw={rot_range[2]:.1f}") + + +if __name__ == "__main__": + main() diff --git a/dora_calibration/tools/compare_calibrations.py b/dora_calibration/tools/compare_calibrations.py new file mode 100644 index 0000000..d7a852d --- /dev/null +++ b/dora_calibration/tools/compare_calibrations.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +"""Compare dora calibration with reference calibration.""" +import argparse +import numpy as np +from scipy.spatial.transform import Rotation + + +def load_calibration(path): + """Load calibration from .npz file.""" + data = np.load(path, allow_pickle=True) + result = {"T_cam2gripper": data["T_cam2gripper"]} + if "camera_matrix" in data: + result["camera_matrix"] = data["camera_matrix"] + if "distortion" in data: + result["distortion"] = data["distortion"] + return result + + +def analyze_transform(T, name): + """Print analysis of a 4x4 transformation matrix.""" + print(f"\n=== {name} ===") + print(f"Translation (mm): [{T[0,3]*1000:.2f}, {T[1,3]*1000:.2f}, {T[2,3]*1000:.2f}]") + R = T[:3, :3] + rpy = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + print(f"RPY (deg): [{rpy[0]:.2f}, {rpy[1]:.2f}, {rpy[2]:.2f}]") + print(f"Full matrix:\n{T}") + + +def analyze_camera(cam_matrix, name): + """Print camera intrinsics analysis.""" + print(f"\n=== {name} Camera Intrinsics ===") + fx, fy = cam_matrix[0, 0], cam_matrix[1, 1] + cx, cy = cam_matrix[0, 2], cam_matrix[1, 2] + print(f"fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}") + + # Sanity checks + issues = [] + if fx < 100 or fy < 100: + issues.append(f"Focal lengths too small (fx={fx:.1f}, fy={fy:.1f})") + if cx < 100 or cy < 100: + issues.append(f"Principal point too small (cx={cx:.1f}, cy={cy:.1f})") + if abs(fx - fy) > 50: + issues.append(f"Large focal length asymmetry: |fx-fy|={abs(fx-fy):.1f}") + + if issues: + print("WARNINGS:") + for issue in issues: + print(f" - {issue}") + else: + print("Intrinsics appear valid") + + +def main(): + parser = argparse.ArgumentParser(description="Compare dora vs reference calibration.") + parser.add_argument("--dora", default="calibration.npz", + help="Path to dora calibration result") + parser.add_argument("--reference", default="reference_calibration.npz", + help="Path to reference calibration result") + parser.add_argument("--check-intrinsics", action="store_true", + help="Also compare camera intrinsics") + args = parser.parse_args() + + try: + dora_data = load_calibration(args.dora) + dora = dora_data["T_cam2gripper"] + except FileNotFoundError: + print(f"Dora calibration not found: {args.dora}") + return + + try: + ref_data = load_calibration(args.reference) + ref = ref_data["T_cam2gripper"] + except FileNotFoundError: + print(f"Reference calibration not found: {args.reference}") + print("Analyzing dora calibration only:") + analyze_transform(dora, "Dora Calibration") + if args.check_intrinsics and "camera_matrix" in dora_data: + analyze_camera(dora_data["camera_matrix"], "Dora") + return + + analyze_transform(dora, "Dora Calibration") + analyze_transform(ref, "Reference Calibration") + + # Check camera intrinsics if requested + if args.check_intrinsics: + if "camera_matrix" in dora_data: + analyze_camera(dora_data["camera_matrix"], "Dora") + if "camera_matrix" in ref_data: + analyze_camera(ref_data["camera_matrix"], "Reference") + + # Compute differences + print("\n=== DIFFERENCES ===") + trans_diff = np.linalg.norm(dora[:3,3] - ref[:3,3]) * 1000 + print(f"Translation difference: {trans_diff:.2f} mm") + + R_dora = dora[:3,:3] + R_ref = ref[:3,:3] + R_diff = R_dora @ R_ref.T + angle_diff = np.rad2deg(np.arccos(np.clip((np.trace(R_diff) - 1) / 2, -1, 1))) + print(f"Rotation difference: {angle_diff:.2f} deg") + + # Thresholds for acceptable calibration + if trans_diff < 5.0 and angle_diff < 2.0: + print("\n[OK] Calibrations are within acceptable tolerance") + else: + print("\n[FAIL] Calibrations differ significantly!") + if trans_diff >= 5.0: + print(f" Translation diff {trans_diff:.2f}mm exceeds 5mm threshold") + if angle_diff >= 2.0: + print(f" Rotation diff {angle_diff:.2f} deg exceeds 2 deg threshold") + + +if __name__ == "__main__": + main() diff --git a/dora_calibration/tools/compare_hand_eye.py b/dora_calibration/tools/compare_hand_eye.py new file mode 100644 index 0000000..a27d220 --- /dev/null +++ b/dora_calibration/tools/compare_hand_eye.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +"""Recompute hand-eye calibration from an observations .npz file.""" + +import argparse +from typing import List + +import cv2 +import numpy as np + + +def _method_from_name(name: str) -> int: + name = name.upper() + methods = { + "TSAI": cv2.CALIB_HAND_EYE_TSAI, + "PARK": cv2.CALIB_HAND_EYE_PARK, + "HORAUD": cv2.CALIB_HAND_EYE_HORAUD, + "ANDREFF": cv2.CALIB_HAND_EYE_ANDREFF, + "DANIILIDIS": cv2.CALIB_HAND_EYE_DANIILIDIS, + } + return methods.get(name, cv2.CALIB_HAND_EYE_TSAI) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Recompute hand-eye from observations npz.") + parser.add_argument("observations", help="*_observations.npz path") + parser.add_argument("--method", default="TSAI") + args = parser.parse_args() + + data = np.load(args.observations, allow_pickle=True) + R_gripper2base: List[np.ndarray] = list(data["R_gripper2base"]) + t_gripper2base: List[np.ndarray] = list(data["t_gripper2base"]) + R_target2cam: List[np.ndarray] = list(data["R_target2cam"]) + t_target2cam: List[np.ndarray] = list(data["t_target2cam"]) + + method = _method_from_name(args.method) + R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye( + R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, method=method + ) + + T_cam2gripper = np.eye(4) + T_cam2gripper[:3, :3] = R_cam2gripper + T_cam2gripper[:3, 3] = t_cam2gripper.reshape(3) + + base_targets = [] + for i in range(len(R_gripper2base)): + base_T_gripper = np.eye(4) + base_T_gripper[:3, :3] = R_gripper2base[i] + base_T_gripper[:3, 3] = np.array(t_gripper2base[i]).reshape(3) + cam_T_target = np.eye(4) + cam_T_target[:3, :3] = R_target2cam[i] + cam_T_target[:3, 3] = np.array(t_target2cam[i]).reshape(3) + base_T_target = base_T_gripper @ T_cam2gripper @ cam_T_target + base_targets.append(base_T_target[:3, 3]) + base_targets = np.array(base_targets) + mean_target = base_targets.mean(axis=0) + errors = np.linalg.norm(base_targets - mean_target[None, :], axis=1) + + print("T_cam2gripper:") + print(T_cam2gripper) + print(f"Mean translation error: {errors.mean()*1000:.2f} mm") + print(f"Max translation error: {errors.max()*1000:.2f} mm") + + +if __name__ == "__main__": + main() diff --git a/dora_ulite6/README.md b/dora_ulite6/README.md index 1d58115..55efafe 100644 --- a/dora_ulite6/README.md +++ b/dora_ulite6/README.md @@ -11,6 +11,7 @@ Dora node for controlling a UFactory Lite6 robot via REST API, web UI, and publi inputs: tick: dora/timer/millis/10 outputs: [status, joint_pose, tcp_pose] + inputs: [command] env: ROBOT_IP: "192.168.1.192" DEFAULT_SPEED: "30" @@ -90,3 +91,12 @@ curl -X POST http://localhost:8080/api/move_to_pose \ - `status` - JSON: `{ok, action, message, timestamp_ns}` - `joint_pose` - 6 joint angles in degrees - `tcp_pose` - `[x, y, z, roll, pitch, yaw]` in mm/deg + +## Dora Inputs + +- `command` - JSON string: `{id, action, payload}` + - `id`: unique command id (required) + - `action`: `home`, `move_to`, or `move_to_pose` + - `payload`: fields for move (`x`, `y`, `z`, `roll`, `pitch`, `yaw`, `speed`) + +`status` will include `command_id` for command responses. diff --git a/dora_ulite6/dora_ulite6/main.py b/dora_ulite6/dora_ulite6/main.py index 900d9ed..6615165 100644 --- a/dora_ulite6/dora_ulite6/main.py +++ b/dora_ulite6/dora_ulite6/main.py @@ -732,16 +732,55 @@ def _send_tcp_pose(node: Node, tcp_pose: Dict[str, float]) -> None: node.send_output("tcp_pose", pa.array(vec), metadata=metadata) -def _send_error_status(node: Node, action: str, message: str) -> None: +def _send_error_status( + node: Node, action: str, message: str, status: Optional[Dict[str, Any]] = None +) -> None: payload = { "ok": False, "action": action, "message": message, "timestamp_ns": time.time_ns(), } + if status: + payload.update(status) node.send_output("status", pa.array([json.dumps(payload)])) +def _send_command_status( + node: Node, + command_id: str, + action: str, + ok: bool, + message: str, + code: Optional[int] = None, + status: Optional[Dict[str, Any]] = None, +) -> None: + payload = { + "ok": ok, + "action": action, + "message": message, + "timestamp_ns": time.time_ns(), + "command_id": command_id, + } + if code is not None: + payload["code"] = code + if status: + payload.update(status) + node.send_output("status", pa.array([json.dumps(payload)])) + + +def _status_snapshot(helper: ULite6Helper) -> Dict[str, Any]: + status = helper.get_status() + return { + "state": status.get("state"), + "mode": status.get("mode"), + "error_code": status.get("error_code"), + "warn_code": status.get("warn_code"), + "has_error": status.get("has_error"), + "has_warn": status.get("has_warn"), + } + + def main() -> None: node = Node() @@ -773,7 +812,107 @@ def main() -> None: _send_joint_pose(node, joints) _send_tcp_pose(node, tcp_pose) except Exception as exc: - _send_error_status(node, "publish_state", str(exc)) + _send_error_status( + node, "publish_state", str(exc), status=_status_snapshot(helper) + ) + elif event["id"] == "command": + try: + value = event["value"] + raw = value[0].as_py() if len(value) else None + if not raw: + _send_command_status( + node, + command_id="", + action="command", + ok=False, + message="Empty command payload", + status=_status_snapshot(helper), + ) + continue + payload = json.loads(raw) + except Exception as exc: + _send_command_status( + node, + command_id="", + action="command", + ok=False, + message=f"Invalid command payload: {exc}", + status=_status_snapshot(helper), + ) + continue + + command_id = str(payload.get("id", "")) + action = str(payload.get("action", "")) + data = payload.get("payload", {}) if isinstance(payload.get("payload", {}), dict) else {} + + if not command_id: + _send_command_status( + node, + command_id="", + action=action or "command", + ok=False, + message="Missing command id", + status=_status_snapshot(helper), + ) + continue + + try: + if action == "home": + code = helper.go_home() + _send_command_status( + node, + command_id=command_id, + action=action, + ok=code == 0, + message="Home command executed", + code=code, + status=_status_snapshot(helper), + ) + elif action in ("move_to", "move_to_pose"): + speed = float(data.get("speed", default_speed)) + x = float(data.get("x", 0.0)) + y = float(data.get("y", 0.0)) + z = float(data.get("z", 0.0)) + roll = float(data.get("roll", 180.0)) + pitch = float(data.get("pitch", 0.0)) + yaw = float(data.get("yaw", 0.0)) + code = helper.move_to_pose( + x, + y, + z, + roll, + pitch, + yaw, + speed, + default_units, + ) + _send_command_status( + node, + command_id=command_id, + action=action, + ok=code == 0, + message="Move command executed", + code=code, + status=_status_snapshot(helper), + ) + else: + _send_command_status( + node, + command_id=command_id, + action=action or "command", + ok=False, + message=f"Unknown action: {action}", + status=_status_snapshot(helper), + ) + except Exception as exc: + _send_command_status( + node, + command_id=command_id, + action=action or "command", + ok=False, + message=str(exc), + status=_status_snapshot(helper), + ) if __name__ == "__main__": diff --git a/dora_zed_cpp/DoraTargets.cmake b/dora_zed_cpp/DoraTargets.cmake index 3a54e3d..836f09c 100644 --- a/dora_zed_cpp/DoraTargets.cmake +++ b/dora_zed_cpp/DoraTargets.cmake @@ -1,11 +1,11 @@ -set(DORA_ROOT_DIR "/home/cristhian/workspace/garbage/dora" CACHE FILEPATH "Path to the root of dora") +set(DORA_ROOT_DIR "" CACHE FILEPATH "Path to the root of dora") set(dora_c_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/c") set(dora_cxx_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/cxx") set(node_bridge "${CMAKE_CURRENT_BINARY_DIR}/node_bridge.cc") -if(DORA_ROOT_DIR) +if(DORA_ROOT_DIR AND EXISTS "${DORA_ROOT_DIR}/Cargo.toml") include(ExternalProject) ExternalProject_Add(Dora SOURCE_DIR ${DORA_ROOT_DIR} @@ -40,10 +40,28 @@ if(DORA_ROOT_DIR) set(dora_link_dirs ${DORA_ROOT_DIR}/target/debug) else() include(ExternalProject) + set(DORA_GIT_TAG "main") + find_program(DORA_CLI dora) + if(DORA_CLI) + execute_process( + COMMAND ${DORA_CLI} --version + RESULT_VARIABLE _dora_version_result + OUTPUT_VARIABLE _dora_version_out + ERROR_VARIABLE _dora_version_err + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(_dora_version_result EQUAL 0) + string(REGEX MATCH "([0-9]+\\.[0-9]+\\.[0-9]+)" _dora_version_match "${_dora_version_out}") + if(_dora_version_match) + set(DORA_GIT_TAG "v${CMAKE_MATCH_1}") + endif() + endif() + endif() + ExternalProject_Add(Dora PREFIX ${CMAKE_CURRENT_BINARY_DIR}/dora GIT_REPOSITORY https://github.com/dora-rs/dora.git - GIT_TAG main + GIT_TAG ${DORA_GIT_TAG} BUILD_IN_SOURCE True CONFIGURE_COMMAND "" BUILD_COMMAND diff --git a/dora_zed_cpp/main.cc b/dora_zed_cpp/main.cc index 245048e..89927fa 100644 --- a/dora_zed_cpp/main.cc +++ b/dora_zed_cpp/main.cc @@ -189,6 +189,7 @@ int main() { auto [fx, fy, cx, cy, distortion] = reload_calibration(); + int64_t frame_id = 0; while (true) { auto event = dora_node.events->next(); auto type = event_type(event); @@ -237,6 +238,12 @@ int main() { image_metadata->set_float("cx", cx); image_metadata->set_float("cy", cy); image_metadata->set_list_float("distortion", make_distortion(distortion)); + image_metadata->set_int("frame_id", frame_id++); + image_metadata->set_int("timestamp_ns", static_cast( + std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch() + ).count() + )); auto image_result = send_output_with_metadata( dora_node.send_output, "image_bgr", image_slice, std::move(image_metadata));