First commit

This commit is contained in:
Cristhian Aguilera
2026-01-28 16:21:37 -03:00
parent 2da5baf508
commit 610c43e16d
17 changed files with 2113 additions and 2 deletions

295
.gitignore vendored Normal file
View File

@@ -0,0 +1,295 @@
# Created by https://www.toptal.com/developers/gitignore/api/osx,windows,linux,c++,python,rust
# Edit at https://www.toptal.com/developers/gitignore?templates=osx,windows,linux,c++,python,rust
out/
### C++ ###
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
### Linux ###
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### OSX ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
### Rust ###
# Generated by Cargo
# will have compiled files and executables
debug/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### Windows ###
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# End of https://www.toptal.com/developers/gitignore/api/osx,windows,linux,c++,python,rust

View File

@@ -1,2 +1,29 @@
# Littlehand ## Getting started
Littlehand
- Install it with:
```bash
uv venv -p 3.12 --seed
dora build [dataflow.yml] --uv
```
- Run it with:
```bash
dora run [dataflow.yml] --uv
```
## Dataflows
| Dataflow | Description |
|----------|-------------|
| `dataflow_ulite6.yml` | Ufactory ULite6 robot control with web UI|
| `dataflow_zed_cpp.yml` | ZED camera capture with image viewer |
## Nodes
| Node | Description |
|------|-------------|
| `ulite6` | UFactory Lite6 robot controller with REST API and web UI |
| `zed_camera_cpp` | ZED stereo camera capture (C++) |
| `image_viewer` | Display images from Dora stream |

17
dataflow_ulite6.yml Normal file
View File

@@ -0,0 +1,17 @@
nodes:
- id: ulite6
build: uv pip install -e dora_ulite6
path: dora_ulite6/dora_ulite6/main.py
inputs:
tick: dora/timer/millis/10
outputs:
- status
- joint_pose
- tcp_pose
env:
ROBOT_IP: "192.168.1.192"
DEFAULT_SPEED: "30"
DEFAULT_UNITS: "mm"
API_HOST: "0.0.0.0"
API_PORT: "8080"
VACUUM_ENABLED: "true"

30
dataflow_zed_cpp.yml Normal file
View File

@@ -0,0 +1,30 @@
nodes:
- id: zed_camera_cpp
build: bash -lc "cmake -S dora_zed_cpp -B dora_zed_cpp/build && cmake --build dora_zed_cpp/build"
path: dora_zed_cpp/build/dora_zed_cpp
env:
ZED_RESOLUTION: "720"
ZED_FPS: "15"
ZED_DEPTH_MODE: "NEURAL"
ZED_DEPTH_MIN_MM: "10"
ZED_DEPTH_MAX_MM: "500"
ZED_DEPTH_FILL: "false"
ZED_FLIP: "ON"
ZED_WARMUP_FRAMES: "30"
inputs:
tick: dora/timer/millis/100
outputs:
- image_bgr
- point_cloud
- camera_info
- id: image_viewer
build: |
uv venv -p 3.12 --seed --allow-existing
uv pip install -e dora_image_viewer
path: dora_image_viewer/dora_image_viewer/main.py
env:
VIRTUAL_ENV: ./.venv
PLOT_WIDTH: 1280
PLOT_HEIGHT: 720
inputs:
image: zed_camera_cpp/image_bgr

View File

@@ -0,0 +1,95 @@
# Dora Node for plotting data with OpenCV
This node is used to plot a text and a list of bbox on a base image (ideal for object detection).
# YAML
```yaml
- id: dora-image-viewer
build: pip install ../../dora_image_viewer
path: dora_image_viewer/dora_image_viewer/main.py
inputs:
# image: Arrow array of size 1 containing the base image
# bbox: Arrow array of bbox
# text: Arrow array of size 1 containing the text to be plotted
env:
PLOT_WIDTH: 640 # optional, default is image input width
PLOT_HEIGHT: 480 # optional, default is image input height
```
# Inputs
- `image`: Arrow array containing the base image
```python
## Image data
image_data: UInt8Array # Example: pa.array(img.ravel())
metadata = {
"width": 640,
"height": 480,
"shape": [480, 640, 3], # optional alternative to width/height
"encoding": str, # bgr8, rgb8
}
## Example
node.send_output(
image_data, {"width": 640, "height": 480, "encoding": "bgr8"}
)
## Decoding
storage = event["value"]
metadata = event["metadata"]
encoding = metadata["encoding"]
width = metadata.get("width")
height = metadata.get("height")
shape = metadata.get("shape")
if encoding == "bgr8":
channels = 3
storage_type = np.uint8
frame = (
storage.to_numpy()
.astype(storage_type)
.reshape((height, width, channels))
)
```
- `bbox`: an arrow array containing the bounding boxes, confidence scores, and class names of the detected objects
```Python
bbox: {
"bbox": np.array, # flattened array of bounding boxes
"conf": np.array, # flat array of confidence scores
"labels": np.array, # flat array of class names
}
encoded_bbox = pa.array([bbox], {"format": "xyxy"})
decoded_bbox = {
"bbox": encoded_bbox[0]["bbox"].values.to_numpy().reshape(-1, 4),
"conf": encoded_bbox[0]["conf"].values.to_numpy(),
"labels": encoded_bbox[0]["labels"].values.to_numpy(zero_copy_only=False),
}
```
- `text`: Arrow array containing the text to be plotted
```python
text: str
encoded_text = pa.array([text])
decoded_text = encoded_text[0].as_py()
```
## Example
Check example at [examples/python-dataflow](examples/python-dataflow)
## License
This project is licensed under Apache-2.0. Check out [NOTICE.md](../../NOTICE.md) for more information.

View File

@@ -0,0 +1,13 @@
"""TODO: Add docstring."""
import os
# Define the path to the README file relative to the package directory
readme_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "README.md")
# Read the content of the README file
try:
with open(readme_path, encoding="utf-8") as f:
__doc__ = f.read()
except FileNotFoundError:
__doc__ = "README file not found."

View File

@@ -0,0 +1,250 @@
"""TODO: Add docstring."""
import argparse
import io
import os
import cv2
import numpy as np
import pyarrow as pa
from dora import Node
from PIL import (
Image,
)
if True:
import pillow_avif # noqa # noqa
RUNNER_CI = True if os.getenv("CI") == "true" else False
class Plot:
"""TODO: Add docstring."""
frame: np.array = np.array([])
bboxes: dict = {
"bbox": np.array([]),
"conf": np.array([]),
"labels": np.array([]),
}
text: str = ""
width: np.uint32 = None
height: np.uint32 = None
def plot_frame(plot):
"""TODO: Add docstring."""
for bbox in zip(plot.bboxes["bbox"], plot.bboxes["conf"], plot.bboxes["labels"]):
[
[min_x, min_y, max_x, max_y],
confidence,
label,
] = bbox
cv2.rectangle(
plot.frame,
(int(min_x), int(min_y)),
(int(max_x), int(max_y)),
(0, 255, 0),
2,
)
cv2.putText(
plot.frame,
f"{label}, {confidence:0.2f}",
(int(max_x) - 120, int(max_y) - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
1,
)
cv2.putText(
plot.frame,
plot.text,
(20, 20),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
1,
)
if plot.width is not None and plot.height is not None:
plot.frame = cv2.resize(plot.frame, (plot.width, plot.height))
if not RUNNER_CI:
if len(plot.frame.shape) >= 3:
cv2.imshow("Dora Node: dora-image-viewer", plot.frame)
def yuv420p_to_bgr_opencv(yuv_array, width, height):
"""TODO: Add docstring."""
yuv_array = yuv_array[: width * height * 3 // 2]
yuv = yuv_array.reshape((height * 3 // 2, width))
return cv2.cvtColor(yuv, cv2.COLOR_YUV420p2RGB)
def main():
# Handle dynamic nodes, ask for the name of the node in the dataflow, and the same values as the ENV variables.
"""TODO: Add docstring."""
parser = argparse.ArgumentParser(
description="OpenCV Plotter: This node is used to plot text and bounding boxes on an image.",
)
parser.add_argument(
"--name",
type=str,
required=False,
help="The name of the node in the dataflow.",
default="dora-image-viewer",
)
parser.add_argument(
"--plot-width",
type=int,
required=False,
help="The width of the plot.",
default=None,
)
parser.add_argument(
"--plot-height",
type=int,
required=False,
help="The height of the plot.",
default=None,
)
args = parser.parse_args()
plot_width = os.getenv("PLOT_WIDTH", args.plot_width)
plot_height = os.getenv("PLOT_HEIGHT", args.plot_height)
if plot_width is not None:
if isinstance(plot_width, str) and plot_width.isnumeric():
plot_width = int(plot_width)
if plot_height is not None:
if isinstance(plot_height, str) and plot_height.isnumeric():
plot_height = int(plot_height)
node = Node(
args.name,
) # provide the name to connect to the dataflow if dynamic node
plot = Plot()
plot.width = plot_width
plot.height = plot_height
pa.array([]) # initialize pyarrow array
for event in node:
event_type = event["type"]
if event_type == "INPUT":
event_id = event["id"]
if event_id == "image":
storage = event["value"]
metadata = event["metadata"]
encoding = metadata["encoding"].lower()
width = metadata.get("width")
height = metadata.get("height")
if (width is None or height is None) and "shape" in metadata:
shape = metadata["shape"]
if isinstance(shape, (list, tuple)) and len(shape) >= 2:
height = height if height is not None else int(shape[0])
width = width if width is not None else int(shape[1])
if width is None or height is None:
raise KeyError("width/height (or shape) missing from metadata")
if encoding == "bgr8":
channels = 3
storage_type = np.uint8
plot.frame = (
storage.to_numpy()
.astype(storage_type)
.reshape((height, width, channels))
.copy() # Copy So that we can add annotation on the image
)
elif encoding == "rgb8":
channels = 3
storage_type = np.uint8
frame = (
storage.to_numpy()
.astype(storage_type)
.reshape((height, width, channels))
)
plot.frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
elif encoding in ["jpeg", "jpg", "jpe", "bmp", "webp", "png"]:
channels = 3
storage_type = np.uint8
storage = storage.to_numpy()
plot.frame = cv2.imdecode(storage, cv2.IMREAD_COLOR)
elif encoding == "yuv420":
storage = storage.to_numpy()
# Convert back to BGR results in more saturated image.
channels = 3
storage_type = np.uint8
img_bgr_restored = yuv420p_to_bgr_opencv(storage, width, height)
plot.frame = img_bgr_restored
elif encoding == "avif":
# Convert AVIF to RGB
array = storage.to_numpy()
bytes = array.tobytes()
img = Image.open(io.BytesIO(bytes))
img = img.convert("RGB")
plot.frame = np.array(img)
plot.frame = cv2.cvtColor(plot.frame, cv2.COLOR_RGB2BGR)
else:
raise RuntimeError(f"Unsupported image encoding: {encoding}")
plot_frame(plot)
if not RUNNER_CI:
if cv2.waitKey(1) & 0xFF == ord("q"):
break
elif event_id == "bbox":
arrow_bbox = event["value"][0]
bbox_format = event["metadata"]["format"].lower()
if bbox_format == "xyxy":
bbox = arrow_bbox["bbox"].values.to_numpy().reshape(-1, 4)
elif bbox_format == "xywh":
original_bbox = arrow_bbox["bbox"].values.to_numpy().reshape(-1, 4)
bbox = np.array(
[
(
x - w / 2,
y - h / 2,
x + w / 2,
y + h / 2,
)
for [x, y, w, h] in original_bbox
],
)
else:
raise RuntimeError(f"Unsupported bbox format: {bbox_format}")
plot.bboxes = {
"bbox": bbox,
"conf": arrow_bbox["conf"].values.to_numpy(),
"labels": arrow_bbox["labels"].values.to_numpy(
zero_copy_only=False,
),
}
elif event_id == "text":
plot.text = event["value"][0].as_py()
elif event_type == "ERROR":
raise RuntimeError(event["error"])
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,37 @@
[project]
name = "dora-image-viewer"
version = "0.4.1"
license = { file = "MIT" }
authors = [
{ name = "Haixuan Xavier Tao", email = "tao.xavier@outlook.com" },
{ name = "Enzo Le Van", email = "dev@enzo-le-van.fr" },
]
description = "Dora Node for plotting text and bbox on image with OpenCV"
requires-python = ">=3.8"
dependencies = [
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"opencv-python >= 4.1.1",
"pillow-avif-plugin>=1.5.1",
"pillow>=10.4.0",
]
[dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"]
[project.scripts]
dora-image-viewer = "dora_image_viewer.main:main"
[tool.ruff.lint]
extend-select = [
"D", # pydocstyle
"UP", # Ruff's UP rule
"PERF", # Ruff's PERF rule
"RET", # Ruff's RET rule
"RSE", # Ruff's RSE rule
"NPY", # Ruff's NPY rule
"N", # Ruff's N rule
"I", # Ruff's I rule
]

View File

@@ -0,0 +1,12 @@
"""TODO: Add docstring."""
import pytest
def test_import_main():
"""TODO: Add docstring."""
from dora_image_viewer.main import main
# Check that everything is working, and catch dora Runtime Exception as we're not running in a dora dataflow.
with pytest.raises(RuntimeError):
main()

92
dora_ulite6/README.md Normal file
View File

@@ -0,0 +1,92 @@
# Dora ULite6 Node
Dora node for controlling a UFactory Lite6 robot via REST API, web UI, and publishing joint/TCP state.
## Dataflow
```yaml
- id: ulite6
build: uv pip install -e dora_ulite6
path: dora_ulite6/dora_ulite6/main.py
inputs:
tick: dora/timer/millis/10
outputs: [status, joint_pose, tcp_pose]
env:
ROBOT_IP: "192.168.1.192"
DEFAULT_SPEED: "30"
DEFAULT_UNITS: "mm"
API_HOST: "0.0.0.0"
API_PORT: "8080"
VACUUM_ENABLED: "false"
```
## Configuration
| Env Variable | Default | Description |
|--------------|---------|-------------|
| `ROBOT_IP` | `192.168.1.192` | Robot IP address |
| `DEFAULT_SPEED` | `30` | Movement speed (mm/s) |
| `DEFAULT_UNITS` | `mm` | Position units (mm or m) |
| `API_HOST` | `0.0.0.0` | API server host |
| `API_PORT` | `8080` | API server port |
| `VACUUM_ENABLED` | `false` | Enable vacuum gripper controls |
## Web UI
Access at `http://localhost:8080/`
- Live status, TCP pose, and joint angles
- Home and Reset buttons
- Move to position form
- Vacuum gripper controls (when enabled)
## REST API
Interactive docs at `http://localhost:8080/docs`
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/` | Web control interface |
| GET | `/api/status` | Robot status (connected, errors) |
| GET | `/api/pose` | Current TCP pose |
| GET | `/api/joints` | Current joint angles |
| GET | `/api/config` | API configuration |
| POST | `/api/home` | Go to home position |
| POST | `/api/reset` | Clear errors and reset state |
| POST | `/api/move_to` | Move to position |
| POST | `/api/move_to_pose` | Move to full pose |
| POST | `/api/disconnect` | Disconnect from robot |
### Vacuum Gripper (when `VACUUM_ENABLED=true`)
| Method | Endpoint | Description |
|--------|----------|-------------|
| GET | `/api/vacuum` | Get vacuum status |
| POST | `/api/vacuum/on` | Turn vacuum on |
| POST | `/api/vacuum/off` | Turn vacuum off |
## CLI Examples
```bash
# Get pose
curl http://localhost:8080/api/pose
# Go home
curl -X POST http://localhost:8080/api/home
# Move to position
curl -X POST http://localhost:8080/api/move_to \
-H "Content-Type: application/json" \
-d '{"x": 200, "y": 0, "z": 300}'
# Move with orientation
curl -X POST http://localhost:8080/api/move_to_pose \
-H "Content-Type: application/json" \
-d '{"x": 200, "y": 0, "z": 300, "roll": 180, "pitch": 0, "yaw": 0}'
```
## Dora Outputs
- `status` - JSON: `{ok, action, message, timestamp_ns}`
- `joint_pose` - 6 joint angles in degrees
- `tcp_pose` - `[x, y, z, roll, pitch, yaw]` in mm/deg

View File

@@ -0,0 +1 @@
"""Dora ULite6 node package."""

View File

@@ -0,0 +1,780 @@
"""Dora node for controlling a UFactory Lite6 robot and publishing state."""
import json
import os
import threading
import time
from typing import Any, Dict, Optional
import numpy as np
import pyarrow as pa
import uvicorn
from dora import Node
from fastapi import FastAPI, HTTPException
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from scipy.spatial.transform import Rotation
from xarm.wrapper import XArmAPI
class ULite6Helper:
"""Minimal ULite6 helper based on ufactory-control."""
def __init__(self, robot_ip: str):
self.robot_ip = robot_ip
self.arm = XArmAPI(self.robot_ip)
self.arm.connect()
self.arm.motion_enable(enable=True)
def go_home(self) -> int:
self.arm.set_mode(0)
self.arm.set_state(state=0)
code = self.arm.move_gohome(wait=True)
# move_gohome may return tuple (code, result) in some versions
if isinstance(code, tuple):
code = code[0]
return code
def move_to_pose(
self,
x: float,
y: float,
z: float,
roll: float,
pitch: float,
yaw: float,
speed: float = 30.0,
units: str = "mm",
) -> int:
if units == "m":
x, y, z = x * 1000.0, y * 1000.0, z * 1000.0
self.arm.set_mode(0)
self.arm.set_state(0)
return self.arm.set_position(
x, y, z, roll=roll, pitch=pitch, yaw=yaw, speed=speed, wait=True
)
def get_current_position(self) -> Dict[str, float]:
code, pos = self.arm.get_position()
if code != 0:
raise RuntimeError(f"get_position failed with code {code}")
return {
"x": pos[0],
"y": pos[1],
"z": pos[2],
"roll": pos[3],
"pitch": pos[4],
"yaw": pos[5],
}
def get_pose_matrix(self, tcp_offset_mm: float = 0.0) -> np.ndarray:
pos = self.get_current_position()
R = Rotation.from_euler(
"xyz", [pos["roll"], pos["pitch"], pos["yaw"]], degrees=True
).as_matrix()
position_m = np.array([pos["x"], pos["y"], pos["z"]]) / 1000.0
if tcp_offset_mm != 0.0:
tcp_offset_local = np.array([0, 0, tcp_offset_mm]) / 1000.0
tcp_offset_global = R @ tcp_offset_local
position_m = position_m + tcp_offset_global
T = np.eye(4)
T[:3, :3] = R
T[:3, 3] = position_m
return T
def get_joint_angles_deg(self) -> np.ndarray:
code, angles = self.arm.get_servo_angle(is_radian=False)
if code != 0:
raise RuntimeError(f"get_servo_angle failed with code {code}")
return np.array(angles, dtype=float)
def get_status(self) -> Dict[str, Any]:
"""Get robot status information."""
return {
"connected": self.arm.connected,
"state": self.arm.state,
"mode": self.arm.mode,
"error_code": self.arm.error_code,
"warn_code": self.arm.warn_code,
"has_error": self.arm.has_error,
"has_warn": self.arm.has_warn,
"motor_enable_states": self.arm.motor_enable_states,
}
def reset_state(self) -> int:
"""Clear errors/warnings and reset robot state."""
self.arm.clean_error()
self.arm.clean_warn()
self.arm.motion_enable(enable=True)
self.arm.set_mode(0)
return self.arm.set_state(0)
def set_vacuum_gripper(self, on: bool) -> int:
"""Turn vacuum gripper on or off."""
code = self.arm.set_vacuum_gripper(on)
if isinstance(code, tuple):
code = code[0]
return code
def get_vacuum_gripper(self) -> Dict[str, Any]:
"""Get vacuum gripper status."""
code, status = self.arm.get_vacuum_gripper()
return {"code": code, "on": status == 1}
def disconnect(self) -> int:
return self.arm.disconnect()
# --- Pydantic models for API requests ---
class MoveToRequest(BaseModel):
x: float
y: float
z: float
speed: Optional[float] = None
roll: Optional[float] = 180.0
pitch: Optional[float] = 0.0
yaw: Optional[float] = 0.0
class MoveToPoseRequest(BaseModel):
x: float
y: float
z: float
roll: float
pitch: float
yaw: float
speed: Optional[float] = None
# --- FastAPI app factory ---
def create_api(
helper: ULite6Helper, default_speed: float, default_units: str, vacuum_enabled: bool
) -> FastAPI:
"""Create FastAPI application with robot control endpoints."""
app = FastAPI(
title="ULite6 Robot Control API",
description="REST API for controlling UFactory Lite6 robot",
version="0.1.0",
)
@app.get("/", response_class=HTMLResponse)
def web_ui():
"""Serve web control interface."""
return """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>ULite6 Control</title>
<style>
* { box-sizing: border-box; margin: 0; padding: 0; }
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
background: #1a1a2e;
color: #eee;
min-height: 100vh;
padding: 20px;
}
h1 {
text-align: center;
color: #00d4ff;
margin-bottom: 20px;
font-size: 1.8rem;
}
.container {
max-width: 800px;
margin: 0 auto;
display: grid;
gap: 16px;
}
.card {
background: #16213e;
border-radius: 8px;
padding: 16px;
border: 1px solid #0f3460;
}
.card h2 {
color: #00d4ff;
font-size: 1rem;
margin-bottom: 12px;
border-bottom: 1px solid #0f3460;
padding-bottom: 8px;
}
.status-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(120px, 1fr));
gap: 8px;
}
.status-item {
background: #0f3460;
padding: 8px;
border-radius: 4px;
text-align: center;
}
.status-item .label {
font-size: 0.75rem;
color: #888;
margin-bottom: 4px;
}
.status-item .value {
font-size: 0.9rem;
font-weight: 600;
}
.status-item .value.ok { color: #4ade80; }
.status-item .value.error { color: #f87171; }
.pose-grid {
display: grid;
grid-template-columns: repeat(6, 1fr);
gap: 8px;
}
.pose-item {
background: #0f3460;
padding: 8px;
border-radius: 4px;
text-align: center;
}
.pose-item .label {
font-size: 0.7rem;
color: #888;
}
.pose-item .value {
font-size: 0.85rem;
font-family: monospace;
}
.joints-grid {
display: grid;
grid-template-columns: repeat(6, 1fr);
gap: 8px;
margin-top: 12px;
}
.btn-group {
display: flex;
gap: 10px;
flex-wrap: wrap;
}
button {
background: #0f3460;
color: #fff;
border: 1px solid #00d4ff;
padding: 10px 20px;
border-radius: 4px;
cursor: pointer;
font-size: 0.9rem;
transition: all 0.2s;
}
button:hover { background: #00d4ff; color: #1a1a2e; }
button:disabled { opacity: 0.5; cursor: not-allowed; }
button.danger { border-color: #f87171; }
button.danger:hover { background: #f87171; }
.form-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
gap: 10px;
margin-bottom: 12px;
}
.form-group {
display: flex;
flex-direction: column;
}
.form-group label {
font-size: 0.75rem;
color: #888;
margin-bottom: 4px;
}
.form-group input {
background: #0f3460;
border: 1px solid #16213e;
color: #fff;
padding: 8px;
border-radius: 4px;
font-size: 0.9rem;
}
.form-group input:focus {
outline: none;
border-color: #00d4ff;
}
#log {
background: #0a0a15;
border-radius: 4px;
padding: 12px;
font-family: monospace;
font-size: 0.8rem;
max-height: 150px;
overflow-y: auto;
}
.log-entry {
padding: 4px 0;
border-bottom: 1px solid #16213e;
}
.log-entry:last-child { border-bottom: none; }
.log-entry.success { color: #4ade80; }
.log-entry.error { color: #f87171; }
.log-entry .time { color: #666; margin-right: 8px; }
</style>
</head>
<body>
<h1>ULite6 Robot Control</h1>
<div class="container">
<div class="card">
<h2>Status</h2>
<div id="status" class="status-grid">
<div class="status-item">
<div class="label">Connected</div>
<div class="value" id="st-connected">--</div>
</div>
<div class="status-item">
<div class="label">State</div>
<div class="value" id="st-state">--</div>
</div>
<div class="status-item">
<div class="label">Mode</div>
<div class="value" id="st-mode">--</div>
</div>
<div class="status-item">
<div class="label">Error</div>
<div class="value" id="st-error">--</div>
</div>
<div class="status-item">
<div class="label">Warning</div>
<div class="value" id="st-warn">--</div>
</div>
</div>
</div>
<div class="card">
<h2>TCP Pose (mm, deg)</h2>
<div class="pose-grid">
<div class="pose-item"><div class="label">X</div><div class="value" id="pose-x">--</div></div>
<div class="pose-item"><div class="label">Y</div><div class="value" id="pose-y">--</div></div>
<div class="pose-item"><div class="label">Z</div><div class="value" id="pose-z">--</div></div>
<div class="pose-item"><div class="label">Roll</div><div class="value" id="pose-roll">--</div></div>
<div class="pose-item"><div class="label">Pitch</div><div class="value" id="pose-pitch">--</div></div>
<div class="pose-item"><div class="label">Yaw</div><div class="value" id="pose-yaw">--</div></div>
</div>
<h2 style="margin-top: 16px;">Joint Angles (deg)</h2>
<div class="joints-grid">
<div class="pose-item"><div class="label">J1</div><div class="value" id="j0">--</div></div>
<div class="pose-item"><div class="label">J2</div><div class="value" id="j1">--</div></div>
<div class="pose-item"><div class="label">J3</div><div class="value" id="j2">--</div></div>
<div class="pose-item"><div class="label">J4</div><div class="value" id="j3">--</div></div>
<div class="pose-item"><div class="label">J5</div><div class="value" id="j4">--</div></div>
<div class="pose-item"><div class="label">J6</div><div class="value" id="j5">--</div></div>
</div>
</div>
<div class="card">
<h2>Controls</h2>
<div class="btn-group" style="margin-bottom: 16px;">
<button onclick="goHome()" id="btn-home">Home</button>
<button onclick="resetState()" id="btn-reset">Reset</button>
</div>
<div id="vacuum-controls" style="display: none; margin-bottom: 16px;">
<h2>Vacuum Gripper</h2>
<div class="btn-group" style="margin-top: 8px;">
<button onclick="vacuumOn()" id="btn-vacuum-on">Vacuum ON</button>
<button onclick="vacuumOff()" id="btn-vacuum-off">Vacuum OFF</button>
<span id="vacuum-status" style="margin-left: 12px; padding: 8px;">Status: --</span>
</div>
</div>
<h2>Move To Position</h2>
<form id="moveForm" onsubmit="moveTo(event)">
<div class="form-grid">
<div class="form-group">
<label for="move-x">X (mm)</label>
<input type="number" id="move-x" step="0.1" required>
</div>
<div class="form-group">
<label for="move-y">Y (mm)</label>
<input type="number" id="move-y" step="0.1" required>
</div>
<div class="form-group">
<label for="move-z">Z (mm)</label>
<input type="number" id="move-z" step="0.1" required>
</div>
<div class="form-group">
<label for="move-speed">Speed (mm/s)</label>
<input type="number" id="move-speed" step="1" value="30" min="1" max="200">
</div>
</div>
<button type="submit" id="btn-move">Move</button>
</form>
</div>
<div class="card">
<h2>Response Log</h2>
<div id="log"></div>
</div>
</div>
<script>
const $ = id => document.getElementById(id);
function log(msg, isError = false) {
const logEl = $('log');
const time = new Date().toLocaleTimeString();
const entry = document.createElement('div');
entry.className = 'log-entry ' + (isError ? 'error' : 'success');
entry.innerHTML = '<span class="time">' + time + '</span>' + msg;
logEl.insertBefore(entry, logEl.firstChild);
if (logEl.children.length > 50) logEl.removeChild(logEl.lastChild);
}
async function fetchJson(url, opts = {}) {
try {
const res = await fetch(url, opts);
return await res.json();
} catch (e) {
return { error: e.message };
}
}
async function updateStatus() {
const data = await fetchJson('/api/status');
if (data.error) {
$('st-connected').textContent = 'Error';
$('st-connected').className = 'value error';
return;
}
$('st-connected').textContent = data.connected ? 'Yes' : 'No';
$('st-connected').className = 'value ' + (data.connected ? 'ok' : 'error');
$('st-state').textContent = data.state;
$('st-mode').textContent = data.mode;
$('st-error').textContent = data.error_code;
$('st-error').className = 'value ' + (data.error_code === 0 ? 'ok' : 'error');
$('st-warn').textContent = data.warn_code;
$('st-warn').className = 'value ' + (data.warn_code === 0 ? 'ok' : 'error');
}
async function updatePose() {
const pose = await fetchJson('/api/pose');
if (!pose.error) {
$('pose-x').textContent = pose.x.toFixed(1);
$('pose-y').textContent = pose.y.toFixed(1);
$('pose-z').textContent = pose.z.toFixed(1);
$('pose-roll').textContent = pose.roll.toFixed(1);
$('pose-pitch').textContent = pose.pitch.toFixed(1);
$('pose-yaw').textContent = pose.yaw.toFixed(1);
}
const joints = await fetchJson('/api/joints');
if (!joints.error && joints.joints) {
for (let i = 0; i < 6; i++) {
$('j' + i).textContent = joints.joints[i].toFixed(1);
}
}
}
async function goHome() {
$('btn-home').disabled = true;
log('Sending Home command...');
const res = await fetchJson('/api/home', { method: 'POST' });
$('btn-home').disabled = false;
if (res.ok) {
log('Home completed (code: ' + res.code + ')');
} else {
log('Home failed: ' + (res.detail || res.code), true);
}
}
async function resetState() {
$('btn-reset').disabled = true;
log('Resetting robot state...');
const res = await fetchJson('/api/reset', { method: 'POST' });
$('btn-reset').disabled = false;
if (res.ok) {
log('Reset completed (code: ' + res.code + ')');
} else {
log('Reset failed: ' + (res.detail || res.code), true);
}
}
async function moveTo(e) {
e.preventDefault();
const x = parseFloat($('move-x').value);
const y = parseFloat($('move-y').value);
const z = parseFloat($('move-z').value);
const speed = parseFloat($('move-speed').value) || 30;
$('btn-move').disabled = true;
log('Moving to (' + x + ', ' + y + ', ' + z + ') at ' + speed + ' mm/s...');
const res = await fetchJson('/api/move_to', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ x, y, z, speed })
});
$('btn-move').disabled = false;
if (res.ok) {
log('Move completed (code: ' + res.code + ')');
} else {
log('Move failed: ' + (res.detail || res.code), true);
}
}
// Vacuum gripper functions
let vacuumEnabled = false;
async function initConfig() {
const config = await fetchJson('/api/config');
if (config.vacuum_enabled) {
vacuumEnabled = true;
$('vacuum-controls').style.display = 'block';
updateVacuumStatus();
setInterval(updateVacuumStatus, 1000);
}
}
async function updateVacuumStatus() {
if (!vacuumEnabled) return;
const data = await fetchJson('/api/vacuum');
if (!data.error) {
const statusEl = $('vacuum-status');
statusEl.textContent = 'Status: ' + (data.on ? 'ON' : 'OFF');
statusEl.style.color = data.on ? '#4ade80' : '#888';
}
}
async function vacuumOn() {
$('btn-vacuum-on').disabled = true;
log('Turning vacuum ON...');
const res = await fetchJson('/api/vacuum/on', { method: 'POST' });
$('btn-vacuum-on').disabled = false;
if (res.ok) {
log('Vacuum ON (code: ' + res.code + ')');
updateVacuumStatus();
} else {
log('Vacuum ON failed: ' + (res.detail || res.code), true);
}
}
async function vacuumOff() {
$('btn-vacuum-off').disabled = true;
log('Turning vacuum OFF...');
const res = await fetchJson('/api/vacuum/off', { method: 'POST' });
$('btn-vacuum-off').disabled = false;
if (res.ok) {
log('Vacuum OFF (code: ' + res.code + ')');
updateVacuumStatus();
} else {
log('Vacuum OFF failed: ' + (res.detail || res.code), true);
}
}
// Auto-refresh
setInterval(updateStatus, 1000);
setInterval(updatePose, 500);
updateStatus();
updatePose();
initConfig();
</script>
</body>
</html>"""
@app.get("/api/status")
def get_status():
"""Get robot status (connected, enabled, errors)."""
try:
return helper.get_status()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/pose")
def get_pose():
"""Get current TCP pose [x, y, z, roll, pitch, yaw]."""
try:
return helper.get_current_position()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/joints")
def get_joints():
"""Get current joint angles in degrees."""
try:
angles = helper.get_joint_angles_deg()
return {"joints": angles.tolist(), "units": "deg"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/home")
def go_home():
"""Send robot to home position."""
try:
code = helper.go_home()
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/move_to")
def move_to(request: MoveToRequest):
"""Move to position with optional orientation."""
try:
speed = request.speed if request.speed is not None else default_speed
code = helper.move_to_pose(
request.x,
request.y,
request.z,
request.roll,
request.pitch,
request.yaw,
speed,
default_units,
)
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/move_to_pose")
def move_to_pose(request: MoveToPoseRequest):
"""Move to full pose with position and orientation."""
try:
speed = request.speed if request.speed is not None else default_speed
code = helper.move_to_pose(
request.x,
request.y,
request.z,
request.roll,
request.pitch,
request.yaw,
speed,
default_units,
)
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/reset")
def reset_state():
"""Clear errors/warnings and reset robot state."""
try:
code = helper.reset_state()
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/disconnect")
def disconnect():
"""Disconnect from robot."""
try:
code = helper.disconnect()
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# Vacuum gripper endpoints (only if enabled)
if vacuum_enabled:
@app.get("/api/vacuum")
def get_vacuum():
"""Get vacuum gripper status."""
try:
return helper.get_vacuum_gripper()
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/vacuum/on")
def vacuum_on():
"""Turn vacuum gripper on."""
try:
code = helper.set_vacuum_gripper(True)
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/vacuum/off")
def vacuum_off():
"""Turn vacuum gripper off."""
try:
code = helper.set_vacuum_gripper(False)
return {"ok": code == 0, "code": code}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/config")
def get_config():
"""Get API configuration."""
return {"vacuum_enabled": vacuum_enabled}
return app
def run_uvicorn(app: FastAPI, host: str, port: int) -> None:
"""Run uvicorn server (for use in background thread)."""
config = uvicorn.Config(app, host=host, port=port, log_level="warning")
server = uvicorn.Server(config)
server.run()
def _send_joint_pose(node: Node, joints_deg: np.ndarray) -> None:
metadata = {"units": "deg", "timestamp_ns": time.time_ns()}
node.send_output("joint_pose", pa.array(joints_deg.tolist()), metadata=metadata)
def _send_tcp_pose(node: Node, tcp_pose: Dict[str, float]) -> None:
metadata = {
"units_position": "mm",
"units_rotation": "deg",
"timestamp_ns": time.time_ns(),
}
vec = [
tcp_pose["x"],
tcp_pose["y"],
tcp_pose["z"],
tcp_pose["roll"],
tcp_pose["pitch"],
tcp_pose["yaw"],
]
node.send_output("tcp_pose", pa.array(vec), metadata=metadata)
def _send_error_status(node: Node, action: str, message: str) -> None:
payload = {
"ok": False,
"action": action,
"message": message,
"timestamp_ns": time.time_ns(),
}
node.send_output("status", pa.array([json.dumps(payload)]))
def main() -> None:
node = Node()
robot_ip = os.getenv("ROBOT_IP", "192.168.1.192")
default_speed = float(os.getenv("DEFAULT_SPEED", "30"))
default_units = os.getenv("DEFAULT_UNITS", "mm")
api_host = os.getenv("API_HOST", "0.0.0.0")
api_port = int(os.getenv("API_PORT", "8080"))
vacuum_enabled = os.getenv("VACUUM_ENABLED", "false").lower() in ("true", "1", "yes")
helper = ULite6Helper(robot_ip)
# Create and start FastAPI server in background thread
app = create_api(helper, default_speed, default_units, vacuum_enabled)
api_thread = threading.Thread(
target=run_uvicorn, args=(app, api_host, api_port), daemon=True
)
api_thread.start()
# Dora event loop - only handles tick for state publishing
for event in node:
if event["type"] != "INPUT":
continue
if event["id"] == "tick":
try:
joints = helper.get_joint_angles_deg()
tcp_pose = helper.get_current_position()
_send_joint_pose(node, joints)
_send_tcp_pose(node, tcp_pose)
except Exception as exc:
_send_error_status(node, "publish_state", str(exc))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,28 @@
[project]
name = "dora-ulite6"
version = "0.1.0"
license = "MIT"
authors = [
{ name = "Dora" }
]
description = "Dora node for controlling the UFactory Lite6 robot"
requires-python = ">=3.8"
dependencies = [
"dora-rs >= 0.3.9",
"numpy < 2.0.0",
"scipy >= 1.14.0",
"xarm-python-sdk >= 1.17.3",
"fastapi >= 0.109.0",
"uvicorn >= 0.27.0",
]
[dependency-groups]
dev = ["pytest >=8.1.1", "ruff >=0.9.1"]
[project.scripts]
dora-ulite6 = "dora_ulite6.main:main"
[tool.setuptools]
packages = ["dora_ulite6"]

View File

@@ -0,0 +1,34 @@
cmake_minimum_required(VERSION 3.21)
project(dora_zed_cpp LANGUAGES C CXX)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_FLAGS "-fPIC")
include(DoraTargets.cmake)
set(ZED_DIR "/usr/local/zed" CACHE PATH "Path to the ZED SDK")
set(ZED_PATH ${ZED_DIR})
find_package(CUDAToolkit REQUIRED)
find_package(zed REQUIRED)
find_package(OpenCV REQUIRED)
link_directories(${dora_link_dirs})
link_directories(${ZED_LIBRARY_DIR})
add_executable(dora_zed_cpp main.cc ${node_bridge})
add_dependencies(dora_zed_cpp Dora_cxx)
target_include_directories(
dora_zed_cpp
PRIVATE
${dora_cxx_include_dir}
${dora_c_include_dir}
${ZED_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${CUDAToolkit_INCLUDE_DIRS}
)
target_link_libraries(dora_zed_cpp dora_node_api_cxx ${ZED_LIBRARIES} ${OpenCV_LIBS})
install(TARGETS dora_zed_cpp DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/bin)

View File

@@ -0,0 +1,79 @@
set(DORA_ROOT_DIR "/home/cristhian/workspace/garbage/dora" CACHE FILEPATH "Path to the root of dora")
set(dora_c_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/c")
set(dora_cxx_include_dir "${CMAKE_CURRENT_BINARY_DIR}/include/cxx")
set(node_bridge "${CMAKE_CURRENT_BINARY_DIR}/node_bridge.cc")
if(DORA_ROOT_DIR)
include(ExternalProject)
ExternalProject_Add(Dora
SOURCE_DIR ${DORA_ROOT_DIR}
BUILD_IN_SOURCE True
CONFIGURE_COMMAND ""
BUILD_COMMAND
cargo build
--package dora-node-api-c
&&
cargo build
--package dora-node-api-cxx
INSTALL_COMMAND ""
)
add_custom_command(OUTPUT ${node_bridge} ${dora_cxx_include_dir} ${dora_c_include_dir}
WORKING_DIRECTORY ${DORA_ROOT_DIR}
DEPENDS Dora
COMMAND
mkdir ${dora_cxx_include_dir} -p
&&
mkdir ${CMAKE_CURRENT_BINARY_DIR}/include/c -p
&&
cp target/cxxbridge/dora-node-api-cxx/src/lib.rs.cc ${node_bridge}
&&
cp target/cxxbridge/dora-node-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-node-api.h
&&
cp apis/c/node ${CMAKE_CURRENT_BINARY_DIR}/include/c -r
)
add_custom_target(Dora_c DEPENDS ${dora_c_include_dir})
add_custom_target(Dora_cxx DEPENDS ${node_bridge} ${dora_cxx_include_dir})
set(dora_link_dirs ${DORA_ROOT_DIR}/target/debug)
else()
include(ExternalProject)
ExternalProject_Add(Dora
PREFIX ${CMAKE_CURRENT_BINARY_DIR}/dora
GIT_REPOSITORY https://github.com/dora-rs/dora.git
GIT_TAG main
BUILD_IN_SOURCE True
CONFIGURE_COMMAND ""
BUILD_COMMAND
cargo build
--package dora-node-api-c
--target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target
&&
cargo build
--package dora-node-api-cxx
--target-dir ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target
INSTALL_COMMAND ""
)
add_custom_command(OUTPUT ${node_bridge} ${dora_cxx_include_dir} ${dora_c_include_dir}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target
DEPENDS Dora
COMMAND
mkdir ${CMAKE_CURRENT_BINARY_DIR}/include/c -p
&&
mkdir ${dora_cxx_include_dir} -p
&&
cp cxxbridge/dora-node-api-cxx/src/lib.rs.cc ${node_bridge}
&&
cp cxxbridge/dora-node-api-cxx/src/lib.rs.h ${dora_cxx_include_dir}/dora-node-api.h
&&
cp ../apis/c/node ${CMAKE_CURRENT_BINARY_DIR}/include/c -r
)
set(dora_link_dirs ${CMAKE_CURRENT_BINARY_DIR}/dora/src/Dora/target/debug)
add_custom_target(Dora_c DEPENDS ${dora_c_include_dir})
add_custom_target(Dora_cxx DEPENDS ${node_bridge} ${dora_cxx_include_dir})
endif()

31
dora_zed_cpp/README.md Normal file
View File

@@ -0,0 +1,31 @@
# dora_zed_cpp
C++ Dora node for ZED camera streaming.
## Build & run
From the repo root:
```bash
cmake -S dora_zed_cpp -B dora_zed_cpp/build
cmake --build dora_zed_cpp/build
dora run dataflow_zed_cpp.yml
```
## Configuration (env)
- `ZED_RESOLUTION` (720, 1080, 2000)
- `ZED_FPS` (default 15)
- `ZED_DEPTH_MODE` (NEURAL, PERFORMANCE, QUALITY, NONE)
- `ZED_DEPTH_MIN_MM` (default 10)
- `ZED_DEPTH_MAX_MM` (default 500)
- `ZED_DEPTH_FILL` (true/false, default false)
- `ZED_FLIP` (ON/OFF, default ON)
- `ZED_WARMUP_FRAMES` (default 30)
## Outputs
- `image_bgr` raw bytes, metadata: `shape`, `dtype`, `encoding`, `layout`, and intrinsics (`fx`, `fy`, `cx`, `cy`, `distortion`)
- `point_cloud` raw bytes, metadata: `shape`, `dtype`, `channels`, `units`, `layout`, and intrinsics
- `camera_info` raw bytes (4 floats: fx, fy, cx, cy), metadata includes `distortion`

290
dora_zed_cpp/main.cc Normal file
View File

@@ -0,0 +1,290 @@
#include <dora-node-api.h>
#include <sl/Camera.hpp>
#include <opencv2/opencv.hpp>
#include <array>
#include <cctype>
#include <chrono>
#include <cstdlib>
#include <initializer_list>
#include <iostream>
#include <iterator>
#include <string>
#include <thread>
#include <tuple>
#include <vector>
namespace {
int get_env_int(const char* name, int default_value) {
const char* value = std::getenv(name);
if (!value || std::string(value).empty()) {
return default_value;
}
return std::stoi(value);
}
bool get_env_bool(const char* name, bool default_value) {
const char* value = std::getenv(name);
if (!value || std::string(value).empty()) {
return default_value;
}
std::string v(value);
for (auto& c : v) {
c = static_cast<char>(std::tolower(c));
}
return v == "1" || v == "true" || v == "yes" || v == "on";
}
std::string get_env_str(const char* name, const std::string& default_value) {
const char* value = std::getenv(name);
if (!value || std::string(value).empty()) {
return default_value;
}
return std::string(value);
}
sl::RESOLUTION parse_resolution(int resolution) {
switch (resolution) {
case 1080:
return sl::RESOLUTION::HD1080;
case 2000:
return sl::RESOLUTION::HD2K;
case 720:
default:
return sl::RESOLUTION::HD720;
}
}
sl::DEPTH_MODE parse_depth_mode(const std::string& mode) {
if (mode == "PERFORMANCE") {
return sl::DEPTH_MODE::PERFORMANCE;
}
if (mode == "QUALITY") {
return sl::DEPTH_MODE::QUALITY;
}
if (mode == "NONE") {
return sl::DEPTH_MODE::NONE;
}
return sl::DEPTH_MODE::NEURAL;
}
sl::FLIP_MODE parse_flip(const std::string& mode) {
if (mode == "OFF") {
return sl::FLIP_MODE::OFF;
}
return sl::FLIP_MODE::ON;
}
rust::Vec<int64_t> make_shape(int height, int width, int channels) {
rust::Vec<int64_t> shape;
shape.push_back(height);
shape.push_back(width);
shape.push_back(channels);
return shape;
}
rust::Vec<double> make_distortion(const std::vector<double>& distortion) {
rust::Vec<double> values;
for (double v : distortion) {
values.push_back(v);
}
return values;
}
rust::Vec<int64_t> make_vector_int(std::initializer_list<int64_t> values) {
rust::Vec<int64_t> out;
for (int64_t v : values) {
out.push_back(v);
}
return out;
}
void list_available_cameras() {
auto devices = sl::Camera::getDeviceList();
std::cout << "Detected ZED cameras: " << devices.size() << std::endl;
for (std::size_t i = 0; i < devices.size(); i++) {
const auto& dev = devices[i];
std::cout << " - [" << i << "] model=" << sl::toString(dev.camera_model)
<< ", serial=" << dev.serial_number << std::endl;
}
}
bool open_camera(sl::Camera& zed, const sl::InitParameters& init_params) {
sl::ERROR_CODE err = zed.open(init_params);
if (err != sl::ERROR_CODE::SUCCESS) {
std::cerr << "Failed to open ZED camera: " << sl::toString(err) << std::endl;
return false;
}
return true;
}
bool ensure_camera_ready(
sl::Camera& zed,
const sl::InitParameters& init_params,
int sleep_ms
) {
while (true) {
if (open_camera(zed, init_params)) {
return true;
}
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_ms));
}
}
} // namespace
int main() {
auto dora_node = init_dora_node();
const int resolution = get_env_int("ZED_RESOLUTION", 720);
const int fps = get_env_int("ZED_FPS", 15);
const std::string depth_mode = get_env_str("ZED_DEPTH_MODE", "NEURAL");
const int depth_min_mm = get_env_int("ZED_DEPTH_MIN_MM", 10);
const int depth_max_mm = get_env_int("ZED_DEPTH_MAX_MM", 500);
const bool depth_fill = get_env_bool("ZED_DEPTH_FILL", false);
const std::string flip = get_env_str("ZED_FLIP", "ON");
const int warmup_frames = get_env_int("ZED_WARMUP_FRAMES", 30);
const int retry_interval_seconds = get_env_int("ZED_RETRY_INTERVAL_SECONDS", 5);
const int retry_interval_ms = retry_interval_seconds * 1000;
sl::InitParameters init_params;
init_params.camera_resolution = parse_resolution(resolution);
init_params.camera_fps = fps;
init_params.depth_mode = parse_depth_mode(depth_mode);
init_params.coordinate_units = sl::UNIT::MILLIMETER;
init_params.camera_image_flip = parse_flip(flip);
init_params.depth_minimum_distance = depth_min_mm;
init_params.depth_maximum_distance = depth_max_mm;
list_available_cameras();
sl::Camera zed;
ensure_camera_ready(zed, init_params, retry_interval_ms);
sl::RuntimeParameters runtime_params;
runtime_params.enable_depth = true;
runtime_params.enable_fill_mode = depth_fill;
sl::Mat image_mat;
sl::Mat point_cloud_mat;
for (int i = 0; i < warmup_frames; i++) {
zed.grab(runtime_params);
}
auto reload_calibration = [&]() {
sl::CameraInformation info = zed.getCameraInformation();
auto calib = info.camera_configuration.calibration_parameters;
auto left = calib.left_cam;
std::vector<double> dist;
dist.reserve(std::size(left.disto));
for (double v : left.disto) {
dist.push_back(v);
}
return std::tuple<float, float, float, float, std::vector<double>>(
left.fx, left.fy, left.cx, left.cy, std::move(dist));
};
auto [fx, fy, cx, cy, distortion] = reload_calibration();
while (true) {
auto event = dora_node.events->next();
auto type = event_type(event);
if (type == DoraEventType::Stop || type == DoraEventType::AllInputsClosed) {
break;
}
if (type != DoraEventType::Input) {
continue;
}
auto input = event_as_input(std::move(event));
(void)input;
if (zed.grab(runtime_params) != sl::ERROR_CODE::SUCCESS) {
std::cerr << "Grab failed; attempting to reconnect..." << std::endl;
zed.close();
list_available_cameras();
ensure_camera_ready(zed, init_params, retry_interval_ms);
std::tie(fx, fy, cx, cy, distortion) = reload_calibration();
continue;
}
zed.retrieveImage(image_mat, sl::VIEW::LEFT);
zed.retrieveMeasure(point_cloud_mat, sl::MEASURE::XYZRGBA);
int width = image_mat.getWidth();
int height = image_mat.getHeight();
cv::Mat image_rgba(height, width, CV_8UC4, image_mat.getPtr<sl::uchar1>());
cv::Mat image_bgr;
cv::cvtColor(image_rgba, image_bgr, cv::COLOR_BGRA2BGR);
const auto image_size = static_cast<size_t>(image_bgr.total() * image_bgr.elemSize());
rust::Slice<const uint8_t> image_slice{
reinterpret_cast<const uint8_t*>(image_bgr.data), image_size};
auto image_metadata = new_metadata();
image_metadata->set_string("dtype", "uint8");
image_metadata->set_string("encoding", "bgr8");
image_metadata->set_string("layout", "HWC");
image_metadata->set_list_int("shape", make_shape(height, width, 3));
image_metadata->set_float("fx", fx);
image_metadata->set_float("fy", fy);
image_metadata->set_float("cx", cx);
image_metadata->set_float("cy", cy);
image_metadata->set_list_float("distortion", make_distortion(distortion));
auto image_result = send_output_with_metadata(
dora_node.send_output, "image_bgr", image_slice, std::move(image_metadata));
if (!std::string(image_result.error).empty()) {
std::cerr << "Error sending image_bgr: " << std::string(image_result.error) << std::endl;
}
int pc_width = point_cloud_mat.getWidth();
int pc_height = point_cloud_mat.getHeight();
const size_t pc_size = static_cast<size_t>(pc_width * pc_height * sizeof(sl::float4));
const auto* pc_ptr = reinterpret_cast<const uint8_t*>(point_cloud_mat.getPtr<sl::float4>());
rust::Slice<const uint8_t> pc_slice{pc_ptr, pc_size};
auto pc_metadata = new_metadata();
pc_metadata->set_string("dtype", "float32");
pc_metadata->set_string("layout", "HWC");
pc_metadata->set_string("channels", "XYZRGBA");
pc_metadata->set_string("units", "mm");
pc_metadata->set_list_int("shape", make_shape(pc_height, pc_width, 4));
pc_metadata->set_float("fx", fx);
pc_metadata->set_float("fy", fy);
pc_metadata->set_float("cx", cx);
pc_metadata->set_float("cy", cy);
pc_metadata->set_list_float("distortion", make_distortion(distortion));
auto pc_result = send_output_with_metadata(
dora_node.send_output, "point_cloud", pc_slice, std::move(pc_metadata));
if (!std::string(pc_result.error).empty()) {
std::cerr << "Error sending point_cloud: " << std::string(pc_result.error) << std::endl;
}
float camera_data[4] = {fx, fy, cx, cy};
rust::Slice<const uint8_t> cam_slice{
reinterpret_cast<const uint8_t*>(camera_data), sizeof(camera_data)};
auto cam_metadata = new_metadata();
cam_metadata->set_string("dtype", "float32");
cam_metadata->set_string("layout", "C");
cam_metadata->set_list_int("shape", make_vector_int({4}));
cam_metadata->set_list_float("distortion", make_distortion(distortion));
auto cam_result = send_output_with_metadata(
dora_node.send_output, "camera_info", cam_slice, std::move(cam_metadata));
if (!std::string(cam_result.error).empty()) {
std::cerr << "Error sending camera_info: " << std::string(cam_result.error) << std::endl;
}
}
zed.close();
return 0;
}