Files
dora_littlehand/dataflow_voice_control_ulite6_zed.yml
2026-02-02 12:29:59 -03:00

132 lines
3.9 KiB
YAML

nodes:
- id: zed_camera_cpp
build: bash -lc "cmake -S dora_zed_cpp -B dora_zed_cpp/build && cmake --build dora_zed_cpp/build"
path: dora_zed_cpp/build/dora_zed_cpp
env:
ZED_RESOLUTION: "720"
ZED_FPS: "15"
ZED_DEPTH_MODE: "NEURAL"
ZED_DEPTH_MIN_MM: "10"
ZED_DEPTH_MAX_MM: "600"
ZED_DEPTH_FILL: "false"
ZED_FLIP: "ON"
ZED_WARMUP_FRAMES: "30"
inputs:
tick: dora/timer/millis/100
outputs:
- image_bgr
- camera_info
- point_cloud
- id: ulite6
build: uv pip install -e dora_ulite6
path: dora_ulite6/dora_ulite6/main.py
inputs:
tick: dora/timer/millis/10
command: voice/robot_cmd
outputs:
- tcp_pose
- status
env:
ROBOT_IP: "192.168.1.192"
DEFAULT_SPEED: "30"
DEFAULT_UNITS: "mm"
API_HOST: "0.0.0.0"
API_PORT: "9000"
VACUUM_ENABLED: "true"
# Initial position on startup: "home", "pose", or "none"
# Set to "none" - voice control handles initial positioning
INIT_MODE: "none"
- id: iobridge
build: |
uv venv -p 3.12 --seed --allow-existing
uv pip install -e dora_iobridge
path: dora_iobridge/dora_iobridge/main.py
env:
VIRTUAL_ENV: ./.venv
WS_HOST: "0.0.0.0"
WS_PORT: "9001"
inputs:
text_in: voice/voice_out
data_in: voice/scene_update
tick: dora/timer/millis/100
outputs:
- text_out
- id: detector
build: |
uv venv -p 3.12 --seed --allow-existing
uv pip install -e dora_yolo_object_detector
path: dora_yolo_object_detector/dora_yolo_object_detector/main.py
env:
VIRTUAL_ENV: ./.venv
IMAGE_INPUT: "image_bgr"
POINT_CLOUD_INPUT: "point_cloud"
POSE_INPUT: "tcp_pose"
OBJECTS_OUTPUT: "objects"
IMAGE_OUTPUT: "image_annotated"
CALIBRATION_FILE: "calibration_ulite6_zed.npz"
DETECTOR_WEIGHTS: "trained_models/yolo8n.pt"
CONFIG_FILE: "config.toml"
ROI_TOP_LEFT: "500,230"
ROI_BOTTOM_RIGHT: "775,510"
SIZE_THRESHOLD: "4200"
DETECT_EVERY_N: "3"
MIN_DEPTH_MM: "10"
MAX_DEPTH_MM: "600"
inputs:
image_bgr: zed_camera_cpp/image_bgr
point_cloud: zed_camera_cpp/point_cloud
tcp_pose: ulite6/tcp_pose
tick: dora/timer/millis/100
outputs:
- objects
- image_annotated
- id: voice
build: |
uv venv -p 3.12 --seed --allow-existing
uv pip install -e dora_voice_control
path: dora_voice_control/dora_voice_control/main.py
env:
VIRTUAL_ENV: ./.venv
ROBOT_TYPE: "vacuum" # "vacuum" or "gripper"
OBJECTS_INPUT: "objects"
POSE_INPUT: "tcp_pose"
STATUS_INPUT: "status"
COMMAND_OUTPUT: "robot_cmd"
CONFIG_FILE: "config.toml"
# Map Spanish command names to detector class names
CLASS_MAP: '{"cilindro": "cylinder", "cubo": "cube", "estrella": "star", "caja": "box", "amarillo": "yellow", "rojo": "red", "azul": "blue", "blanco": "white", "grande": "big", "pequeno": "small"}'
VOICE_IN_INPUT: "voice_in"
VOICE_OUT_OUTPUT: "voice_out"
SCENE_OUTPUT: "scene_update"
TCP_OFFSET_MM: "63.0"
APPROACH_OFFSET_MM: "50.0"
STEP_MM: "20.0"
DEFAULT_ROLL: "180.0"
DEFAULT_PITCH: "0.0"
DEFAULT_YAW: "0.0"
DRY_RUN: "false"
# Initial position (used on startup and reset command)
INIT_ON_START: "true"
INIT_X: "300.0"
INIT_Y: "0.0"
INIT_Z: "350.0"
INIT_ROLL: "180.0"
INIT_PITCH: "0.0"
INIT_YAW: "0.0"
IMAGE_INPUT: "image_annotated"
IMAGE_WIDTH: "1280"
IMAGE_HEIGHT: "720"
API_ENABLED: "true"
API_PORT: "9002"
inputs:
objects: detector/objects
tcp_pose: ulite6/tcp_pose
status: ulite6/status
voice_in: iobridge/text_out
image_annotated: detector/image_annotated
tick: dora/timer/millis/100
outputs:
- robot_cmd
- voice_out
- scene_update