From c84a34761aeebe85787b32dfeec68eb85a484021 Mon Sep 17 00:00:00 2001 From: cagataycali Date: Tue, 31 Mar 2026 21:16:15 -0400 Subject: [PATCH 1/3] docs: rewrite README, update AGENTS.md, add 8 examples - README.md: Complete rewrite with 5-line promise, mermaid architecture diagram, installation extras table, simulation quickstart, policy execution examples - AGENTS.md: Updated project structure to include simulation modules, test instructions, and conventions - examples/01_sim_quickstart.py: 5-line sim quickstart - examples/02_sim_agent.py: Agent + simulation integration - examples/03_sim_recording.py: Dataset recording to LeRobot format - examples/04_real_hardware.py: Real hardware robot setup - examples/05_real_groot_policy.py: GR00T policy on real hardware - examples/06_list_robots.py: Robot discovery and listing - examples/act_policy_simulation.py: ACT policy in MuJoCo sim - examples/physics_agent.py: Natural language physics introspection --- AGENTS.md | 229 +++++++++-- README.md | 639 +++++++++++++----------------- examples/01_sim_quickstart.py | 29 ++ examples/02_sim_agent.py | 28 ++ examples/03_sim_recording.py | 43 ++ examples/04_real_hardware.py | 38 ++ examples/05_real_groot_policy.py | 62 +++ examples/06_list_robots.py | 28 ++ examples/act_policy_simulation.py | 43 ++ examples/physics_agent.py | 106 +++++ 10 files changed, 864 insertions(+), 381 deletions(-) create mode 100644 examples/01_sim_quickstart.py create mode 100644 examples/02_sim_agent.py create mode 100644 examples/03_sim_recording.py create mode 100644 examples/04_real_hardware.py create mode 100644 examples/05_real_groot_policy.py create mode 100644 examples/06_list_robots.py create mode 100755 examples/act_policy_simulation.py create mode 100644 examples/physics_agent.py diff --git a/AGENTS.md b/AGENTS.md index 6c1ad5a..3983f43 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -2,7 +2,7 @@ ## Overview -`strands-robots` is a robot control library for [Strands Agents](https://strandsagents.com). It provides policy inference, teleoperation, calibration, and simulation tools for physical robots. +`strands-robots` is a robot control and simulation library for [Strands Agents](https://strandsagents.com). It provides policy inference, simulation (MuJoCo), dataset recording (LeRobot format), teleoperation, and calibration tools for 38+ robots. ## Project Dashboard @@ -17,10 +17,16 @@ ``` strands_robots/ +├── __init__.py # Public API: Robot, list_robots, resolve_name +├── factory.py # Robot("so100") → sim or hardware dispatch +├── robot.py # HardwareRobot class (real robot control) +├── _async_utils.py # Coroutine resolution helpers +├── utils.py # require_optional(), shared utilities +│ ├── policies/ # Policy providers (pluggable via registry) │ ├── base.py # Abstract Policy base class │ ├── factory.py # create_policy() factory + registry -│ ├── mock.py # MockPolicy for testing +│ ├── mock.py # MockPolicy for testing (random actions) │ ├── groot/ # NVIDIA GR00T N1.5/N1.6/N1.7 inference │ │ ├── policy.py # Gr00tPolicy (ZMQ + HTTP modes) │ │ ├── client.py # Gr00tInferenceClient @@ -30,34 +36,201 @@ strands_robots/ │ ├── policy.py # LerobotLocalPolicy (RTC support) │ ├── processor.py # ProcessorBridge (pre/post pipelines) │ └── resolution.py # Policy class resolution (v0.4/v0.5) -├── registry/ # JSON registry for policy discovery -├── tools/ # Strands @tool functions -│ ├── gr00t_inference.py # GR00T inference tool -│ ├── lerobot_calibrate.py -│ ├── lerobot_camera.py -│ ├── lerobot_teleoperate.py -│ ├── pose_tool.py -│ └── serial_tool.py -├── robot.py # Core Robot class -└── utils.py # Shared utilities (require_optional, etc.) - -tests/ # Unit tests (run with: hatch run test) -tests_integ/ # Integration tests (run with: hatch run test-integ) +│ +├── simulation/ # Simulation backends +│ ├── base.py # SimulationBackend ABC +│ ├── factory.py # create_simulation() dispatch +│ ├── models.py # SimWorld, SimRobot, SimObject, SimCamera dataclasses +│ ├── model_registry.py # URDF/MJCF path resolution +│ └── mujoco/ # MuJoCo backend (primary) +│ ├── simulation.py # Simulation(AgentTool) — 35 actions via NL +│ ├── backend.py # _ensure_mujoco() lazy loader +│ ├── mjcf_builder.py# Procedural MJCF XML generation +│ ├── policy_runner.py # run_policy, eval_policy, replay_episode +│ ├── recording.py # start/stop_recording → LeRobot dataset +│ ├── rendering.py # RGB + depth offscreen rendering +│ ├── randomization.py # Domain randomization (colors, physics, lighting) +│ ├── scene_ops.py # Inject/eject objects & cameras into live scenes +│ └── tool_spec.json # AgentTool JSON schema (35 actions) +│ +├── assets/ # Robot asset manager +│ ├── __init__.py # resolve_model_path(), list_available_robots() +│ └── download.py # Auto-download from MuJoCo Menagerie +│ +├── dataset_recorder.py # DatasetRecorder → LeRobot v3 parquet + video +│ +├── registry/ # JSON registry for robots + policies +│ ├── robots.json # 38 robots, 120+ aliases, asset paths +│ ├── robots.py # get_robot(), list_robots(), resolve_name() +│ ├── policies.json # Policy provider registry +│ ├── policies.py # get_policy_info(), list_policies() +│ └── loader.py # JSON loader utilities +│ +└── tools/ # Strands @tool functions (for Agent use) + ├── download_assets.py # Download robot meshes from Menagerie/GitHub + ├── gr00t_inference.py # GR00T inference tool + ├── lerobot_calibrate.py + ├── lerobot_camera.py + ├── lerobot_teleoperate.py + ├── pose_tool.py + └── serial_tool.py + +tests/ # Unit tests +tests_integ/ # Integration tests (GPU + model weights) +``` + +## Setup & Development + +### Using uv (recommended) + +```bash +# Clone and enter +git clone git@github.com:strands-labs/robots.git && cd robots + +# Create env + install dev deps (uses .python-version=3.12 + uv.lock) +uv sync --extra dev + +# Install with simulation support +uv sync --extra sim --extra dev + +# Install with everything (sim + lerobot + groot) +uv sync --extra all --extra dev + +# Or one-shot editable install +uv pip install -e ".[all,dev]" +``` + +> **Note**: Python >=3.12 is required (enforced by `requires-python` and `.python-version`). +> `uv.lock` is committed — all contributors get identical dependency versions. + +### Optional extras + +| Extra | What it installs | When you need it | +|-------|-----------------|------------------| +| `sim` | `mujoco`, `robot_descriptions`, `opencv`, `Pillow` | Simulation (MuJoCo) | +| `lerobot` | `lerobot>=0.5` | LeRobot policy inference + dataset recording | +| `groot-service` | `pyzmq`, `msgpack` | NVIDIA GR00T inference | +| `all` | All of the above | Full development | +| `dev` | `sim` + `pytest`, `ruff`, `mypy` | Running tests + linting | + +## Testing + +### Run unit tests + +```bash +# All unit tests (34 tests, ~1s) +uv run pytest tests/ -v + +# Specific test files +uv run pytest tests/test_factory.py -v # 22 tests — Robot factory, registry, mode detection +uv run pytest tests/test_mujoco_e2e.py -v # 12 tests — MuJoCo physics, rendering, policy loop +uv run pytest tests/test_registry.py -v # Registry resolution, aliases +uv run pytest tests/test_policies.py -v # Policy creation, mock policy +uv run pytest tests/test_utils.py -v # Utility functions ``` -## Development +### Run integration tests (needs GPU + model weights) ```bash -# Install with all optional deps -pip install -e ".[all,dev]" +uv run pytest tests_integ/ -v --timeout=300 +``` + +### What the tests cover + +**`test_factory.py`** (22 tests): +- Name resolution: canonical, alias, case-insensitive, hyphen-to-underscore +- `list_robots(mode=)`: all, sim, real, both — verifies registry filtering +- Robot registry: so100 exists, all aliases valid, robot count, descriptions +- Auto-detect mode: defaults to sim, env override (`STRANDS_ROBOT_MODE`), case-insensitive +- Robot factory: `Robot()` is callable (AgentTool), unknown backend raises, newton raises NotImplementedError +- URDF path passthrough: `Robot("so100", urdf_path="/custom/path.xml")` +- Top-level import: `from strands_robots import Robot` + +**`test_mujoco_e2e.py`** (12 tests): +- Simulation ABC: all required methods exist on base class +- Shared dataclasses: SimWorld, SimRobot, SimObject, SimCamera, TrajectoryStep +- Physics: step advances time, position actuators move joints, contacts detected, reset zeros time +- Rendering: RGB frames (H×W×3 uint8), depth frames (H×W float32) +- Mock policy loop: generates actions, full observe→act loop, loop with rendering +- Domain randomization: color randomization changes model properties + +### Manual E2E validation + +```bash +# Quick smoke test — Robot → MuJoCo → physics +uv run python3 -c " +from strands_robots import Robot +sim = Robot('unitree_g1') +print(sim.get_state()['content'][0]['text']) +sim.step(n_steps=100) +sim.render(width=320, height=240) +sim.destroy() +print('✅ MuJoCo E2E works') +" + +# Full Agent integration — natural language → simulation +uv run python3 -c " +from strands_robots import Robot +from strands import Agent +robot = Robot('so100') +agent = Agent(tools=[robot]) +result = agent('Get the simulation state and run mock policy for 1 second in fast mode on so100') +print(result) +robot.destroy() +" + +# Policy + video recording +uv run python3 -c " +from strands_robots import Robot +sim = Robot('so100') +result = sim.run_policy( + robot_name='so100', + policy_provider='mock', + instruction='pick up the red cube', + duration=2.0, + fast_mode=True, + record_video='/tmp/so100_demo.mp4', + video_fps=30, +) +print(result['content'][0]['text']) +sim.destroy() +" + +# Dataset recording (LeRobot v3 format) +uv run python3 -c " +from strands_robots import Robot +sim = Robot('so100') +sim.start_recording(repo_id='local/demo', task='pick cube', root='/tmp/demo_dataset') +sim.run_policy(robot_name='so100', policy_provider='mock', instruction='pick cube', duration=2.0, fast_mode=True) +sim.stop_recording() +sim.destroy() +# Verify: /tmp/demo_dataset/meta/info.json + data/chunk-000/file-000.parquet +import json +info = json.load(open('/tmp/demo_dataset/meta/info.json')) +print(f'✅ Dataset: {info[\"total_frames\"]} frames, {info[\"total_episodes\"]} episodes') +" +``` + +### Supported robots for simulation + +Any robot in `registry/robots.json` with an `asset` field works. Assets are auto-downloaded from [MuJoCo Menagerie](https://github.com/google-deepmind/mujoco_menagerie) on first use via `robot_descriptions`. + +```bash +# List all robots +uv run python3 -c "from strands_robots import list_robots; [print(r['name']) for r in list_robots(mode='sim')]" +``` + +Key robots tested: `so100`, `unitree_g1` (30 joints), `panda` (Franka), `unitree_h1`, `aloha`. + +## The 5-Line Promise -# Run tests -hatch run test # unit tests -hatch run test-integ # integration tests (needs GPU + model weights) +```python +from strands_robots import Robot +from strands import Agent -# Lint & format -hatch run lint # ruff check, ruff format --check, mypy -hatch run format # ruff check --fix, ruff format +robot = Robot("so100") # → MuJoCo sim, auto-downloads assets +agent = Agent(tools=[robot]) # → 35 simulation actions as AgentTool +agent("pick up the red cube") # → agent orchestrates sim via natural language ``` > **Note**: Hatch uses `uv` as installer (`installer = "uv"` in pyproject.toml) for faster @@ -65,11 +238,11 @@ hatch run format # ruff check --fix, ruff format ## Key Conventions -1. **Python 3.12+** — `requires-python = ">=3.12"` (LeRobot >=0.5.0 requires 3.12) -2. **Dependency bounds** — `>=1.0` deps: cap major. `<1.0` deps: cap minor. E.g. `lerobot>=0.5.0,<0.6.0` +1. **Python 3.12+** — `requires-python = ">=3.12"` (LeRobot >=0.5.0 requires 3.12, pinned in `.python-version`) +2. **Dependency bounds** — `>=1.0`: cap major. `<1.0`: cap minor. E.g. `lerobot>=0.5.0,<0.6.0` 3. **`__init__.py` must be thin** — exports only, no logic 4. **Imports at file top** — unless lazy-loading heavy deps with documented reason -5. **Raise on fatal errors** — never warn-and-continue if the system will behave unexpectedly +5. **Raise on fatal errors** — never warn-and-continue if behavior will be wrong 6. **No silent defaults on error** — returning zero-valued actions on failure is forbidden 7. **Use `require_optional()`** — from `strands_robots/utils.py` for all optional deps 8. **Integration tests required** — each policy needs `tests_integ/` tests with real inference @@ -79,7 +252,7 @@ hatch run format # ruff check --fix, ruff format ## PR Workflow 1. Create feature branch from `main` -2. Make changes, run `hatch run format && hatch run lint && hatch run test` +2. Make changes, run `uv run ruff check . && uv run ruff format --check . && uv run pytest tests/ -v` 3. All tests must pass, lint must be clean 4. Open PR from your fork, address all review comments 5. Track follow-up items as issues on the [project board](https://github.com/orgs/strands-labs/projects/2) diff --git a/README.md b/README.md index 0a93a93..282005d 100644 --- a/README.md +++ b/README.md @@ -10,26 +10,41 @@

- Robot Control for Strands Agents + Robot Control & Simulation for Strands Agents

PyPI Version GitHub stars License + MuJoCo GR00T LeRobot

Strands Docs + ◆ MuJoCoNVIDIA GR00TLeRobotJetson Containers

-Control robots with natural language through [Strands Agents](https://github.com/strands-agents/sdk-python). Integrates [NVIDIA Isaac GR00T](https://github.com/NVIDIA/Isaac-GR00T) for vision-language-action policies and [LeRobot](https://github.com/huggingface/lerobot) for universal robot support. +Control and simulate robots with natural language through [Strands Agents](https://github.com/strands-agents/sdk-python). Simulate 38 robots in MuJoCo, run policies, record LeRobot datasets, and deploy to real hardware — all from the same API. + +## The 5-Line Promise + +```python +from strands_robots import Robot +from strands import Agent + +robot = Robot("so100") # MuJoCo sim, auto-downloads assets +agent = Agent(tools=[robot]) # 35 simulation actions as AgentTool +agent("Pick up the red cube") # Agent orchestrates sim via natural language +``` + +That's it. `Robot("so100")` auto-detects simulation mode, downloads the MJCF model from [MuJoCo Menagerie](https://github.com/google-deepmind/mujoco_menagerie), builds a physics scene with ground plane and lighting, and exposes 35 actions (step, render, run_policy, record, randomize, ...) as a Strands AgentTool. ## How It Works @@ -37,11 +52,14 @@ Control robots with natural language through [Strands Agents](https://github.com graph LR A[Natural Language
'Pick up the red block'] --> B[Strands Agent] B --> C[Robot Tool] - C --> D[Policy Provider
GR00T/Mock] - C --> E[LeRobot
Hardware Abstraction] - D --> F[Action Chunk
16 timesteps] - F --> E - E --> G[Robot Hardware
SO-101/GR-1/G1] + C --> D{Mode?} + D -->|Simulation| E[MuJoCo Backend
35 actions] + D -->|Hardware| F[LeRobot
Real Robot] + E --> G[Policy Provider
Mock / GR00T / LeRobot] + F --> G + G --> H[Action Chunks
Joint positions] + H --> E + H --> F classDef input fill:#2ea44f,stroke:#1b7735,color:#fff classDef agent fill:#0969da,stroke:#044289,color:#fff @@ -49,79 +67,118 @@ graph LR classDef hardware fill:#bf8700,stroke:#875e00,color:#fff class A input - class B,C agent - class D,F policy - class E,G hardware + class B,C,D agent + class E,F hardware + class G,H policy ``` -## Architecture +## Installation -```mermaid -flowchart TB - subgraph Agent["🤖 Strands Agent"] - NL[Natural Language Input] - Tools[Tool Registry] - end - - subgraph RobotTool["🦾 Robot Tool"] - direction TB - RT[Robot Class] - TM[Task Manager] - AS[Async Executor] - end - - subgraph Policy["🧠 Policy Layer"] - direction TB - PA[Policy Abstraction] - GP[GR00T Policy] - MP[Mock Policy] - CP[Custom Policy] - end - - subgraph Inference["⚡ Inference Service"] - direction TB - DC[Docker Container] - ZMQ[ZMQ Server :5555] - TRT[TensorRT Engine] - end - - subgraph Hardware["🔧 Hardware Layer"] - direction TB - LR[LeRobot] - CAM[Cameras] - SERVO[Feetech Servos] - end - - NL --> Tools - Tools --> RT - RT --> TM - TM --> AS - AS --> PA - PA --> GP - PA --> MP - PA --> CP - GP --> ZMQ - ZMQ --> TRT - TRT --> DC - AS --> LR - LR --> CAM - LR --> SERVO - - classDef agentStyle fill:#0969da,stroke:#044289,color:#fff - classDef robotStyle fill:#2ea44f,stroke:#1b7735,color:#fff - classDef policyStyle fill:#8250df,stroke:#5a32a3,color:#fff - classDef infraStyle fill:#bf8700,stroke:#875e00,color:#fff - classDef hwStyle fill:#d73a49,stroke:#a72b3a,color:#fff - - class NL,Tools agentStyle - class RT,TM,AS robotStyle - class PA,GP,MP,CP policyStyle - class DC,ZMQ,TRT infraStyle - class LR,CAM,SERVO hwStyle +```bash +pip install strands-robots +``` + +### With simulation (MuJoCo) + +```bash +pip install "strands-robots[sim]" +``` + +### With everything + +```bash +pip install "strands-robots[all]" ``` +| Extra | What it adds | When you need it | +|-------|-------------|------------------| +| `sim` | `mujoco`, `robot_descriptions`, `opencv`, `Pillow` | Simulation | +| `lerobot` | `lerobot>=0.5` | LeRobot policy inference + dataset recording | +| `groot-service` | `pyzmq`, `msgpack` | NVIDIA GR00T inference | +| `all` | All of the above | Full development | + ## Quick Start +### Simulation (no hardware needed) + +```python +from strands_robots import Robot + +# Create simulation — auto-downloads robot model +sim = Robot("unitree_g1") + +# Step physics +sim.step(n_steps=100) + +# Render a frame +sim.render(width=640, height=480, save_path="/tmp/frame.png") + +# Run a policy +sim.run_policy( + robot_name="unitree_g1", + policy_provider="mock", + instruction="walk forward", + duration=5.0, + record_video="/tmp/g1_walk.mp4", +) + +sim.destroy() +``` + +### Agent-Driven Simulation + +```python +from strands_robots import Robot +from strands import Agent + +robot = Robot("so100") +agent = Agent(tools=[robot]) + +# The agent figures out the tool calls +agent(""" +1. Add a red box at [0.3, 0, 0.05] +2. Run mock policy for 3 seconds to pick it up +3. Record video to /tmp/demo.mp4 +4. Show me the final state +""") +``` + +### Dataset Recording (LeRobot v3 format) + +```python +from strands_robots import Robot + +sim = Robot("so100") + +# Start recording to LeRobot dataset +sim.start_recording( + repo_id="my-org/so100-pick-cube", + task="pick up the red cube", + fps=30, + root="/tmp/my_dataset", +) + +# Run policy — frames auto-captured +sim.run_policy( + robot_name="so100", + policy_provider="mock", + instruction="pick up the red cube", + duration=5.0, + fast_mode=True, +) + +# Save episode +sim.stop_recording() +sim.destroy() + +# Output: /tmp/my_dataset/ +# meta/info.json — LeRobot v3 metadata +# meta/tasks.parquet — task descriptions +# data/chunk-000/ — observation.state + action parquet +``` + +### Real Hardware + ```python from strands import Agent from strands_robots import Robot, gr00t_inference @@ -132,150 +189,156 @@ robot = Robot( robot="so101_follower", cameras={ "front": {"type": "opencv", "index_or_path": "/dev/video0", "fps": 30}, - "wrist": {"type": "opencv", "index_or_path": "/dev/video2", "fps": 30} + "wrist": {"type": "opencv", "index_or_path": "/dev/video2", "fps": 30}, }, port="/dev/ttyACM0", - data_config="so100_dualcam" + data_config="so100_dualcam", ) -# Create agent with robot tool agent = Agent(tools=[robot, gr00t_inference]) -# Start GR00T inference service +# Start GR00T inference agent.tool.gr00t_inference( action="start", checkpoint_path="/data/checkpoints/model", port=8000, - data_config="so100_dualcam" + data_config="so100_dualcam", ) -# Control robot with natural language +# Natural language control agent("Use my_arm to pick up the red block using GR00T policy on port 8000") ``` -## Installation +## Architecture -```bash -pip install strands-robots +``` + ┌──────────────────────────┐ + │ Strands Agent │ + │ (natural language in) │ + └──────────┬───────────────┘ + │ + ┌──────────▼───────────────┐ + │ Robot Factory │ + │ Robot("so100") dispatches│ + └──────┬──────────┬────────┘ + │ │ + ┌────────────▼──┐ ┌────▼────────────┐ + │ Simulation │ │ HardwareRobot │ + │ (MuJoCo) │ │ (LeRobot) │ + │ 35 actions │ │ real servos │ + └──────┬────────┘ └────┬─────────────┘ + │ │ + ┌──────▼────────────────▼────────┐ + │ Policy Layer │ + │ mock │ groot │ lerobot_local │ + └──────────────┬─────────────────┘ + │ + ┌──────────────▼─────────────────┐ + │ Dataset Recorder │ + │ LeRobot v3 parquet + video │ + └────────────────────────────────┘ ``` -From source: +## Simulation Features -```bash -git clone https://github.com/strands-labs/robots -cd robots -pip install -e . -``` +The MuJoCo simulation backend exposes **35 actions** as a Strands AgentTool: -
-🐳 Jetson Container Setup (Required for GR00T Inference) +| Category | Actions | +|----------|---------| +| **World** | `create_world`, `load_scene`, `reset`, `get_state`, `destroy` | +| **Robots** | `add_robot`, `remove_robot`, `list_robots`, `get_robot_state` | +| **Objects** | `add_object`, `remove_object`, `move_object`, `list_objects` | +| **Cameras** | `add_camera`, `remove_camera` | +| **Policies** | `run_policy`, `start_policy`, `stop_policy`, `eval_policy`, `replay_episode` | +| **Rendering** | `render`, `render_depth`, `open_viewer`, `close_viewer` | +| **Physics** | `step`, `set_gravity`, `set_timestep`, `get_contacts` | +| **Recording** | `start_recording`, `stop_recording`, `get_recording_status` | +| **Randomization** | `randomize` (colors, physics, lighting, cameras) | +| **Assets** | `list_urdfs`, `register_urdf`, `get_features` | -GR00T inference requires the Isaac-GR00T Docker container on Jetson platforms: +### Supported Robots (38 robots, 120+ aliases) -```bash -# Clone jetson-containers -git clone https://github.com/dusty-nv/jetson-containers -cd jetson-containers +Any robot in the registry works in simulation. Assets auto-download from MuJoCo Menagerie on first use. -# Run Isaac GR00T container (background) -jetson-containers run $(autotag isaac-gr00t) & +```python +from strands_robots import list_robots -# Container exposes inference service on port 5555 (ZMQ) or 8000 (HTTP) +# List all simulation-capable robots +for r in list_robots(): + print(f"{r['name']}: {r['description']}") ``` -**Tested Hardware:** -- NVIDIA Thor Dev Kit (Jetpack 7.0) -- NVIDIA Jetson AGX Orin (Jetpack 6.x) +**Key robots tested**: `so100` (6-DOF arm), `unitree_g1` (30 joints), `panda` (Franka), `unitree_h1` (humanoid), `aloha` (bimanual). -See [Jetson Deployment Guide](https://github.com/NVIDIA/Isaac-GR00T/blob/main/deployment_scripts/README.md) for TensorRT optimization. +### Domain Randomization -
+```python +sim.randomize( + target="colors", # or "physics", "lighting", "camera", "all" + robot_name="so100", +) +``` -## Robot Control Flow +### Policy Evaluation -```mermaid -sequenceDiagram - participant User - participant Agent as Strands Agent - participant Robot as Robot Tool - participant Policy as GR00T Policy - participant HW as Hardware - - User->>Agent: "Pick up the red block" - Agent->>Robot: execute(instruction, policy_port) - - loop Control Loop @ 50Hz - Robot->>HW: get_observation() - HW-->>Robot: {cameras, joint_states} - Robot->>Policy: get_actions(obs, instruction) - Policy-->>Robot: action_chunk[16] - - loop Action Horizon - Robot->>HW: send_action(action) - Note over Robot,HW: 20ms sleep (50Hz) - end - end - - Robot-->>Agent: Task completed - Agent-->>User: "✅ Picked up red block" +```python +result = sim.eval_policy( + robot_name="so100", + policy_provider="mock", + instruction="pick up the cube", + num_episodes=10, + max_steps_per_episode=200, +) +# Returns success rate, mean reward, per-episode stats ``` -## Tools Reference +## Policy Providers -### Robot Tool +| Provider | Description | Requirements | +|----------|-------------|-------------| +| `mock` | Sinusoidal test actions | None | +| `groot` | NVIDIA GR00T N1.5/N1.6 | `[groot-service]` + inference container | +| `lerobot_local` | HuggingFace LeRobot direct inference | `[lerobot]` + model weights | -The `Robot` class is a Strands AgentTool that provides async robot control with real-time status reporting. +```python +from strands_robots.policies.factory import create_policy -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `execute` | `instruction`, `policy_port`, `duration` | Blocking execution until complete | `"Pick up the cube"` | -| `start` | `instruction`, `policy_port`, `duration` | Non-blocking async start | `"Wave your arm"` | -| `status` | - | Get current task status | Check progress | -| `stop` | - | Interrupt running task | Emergency stop | +# Mock (for testing — no deps) +policy = create_policy(provider="mock") -**Natural Language Examples:** +# GR00T (requires inference server) +policy = create_policy(provider="groot", host="localhost", port=8000, data_config="so100_dualcam") -```python -# Blocking execution (waits for completion) -agent("Use my_arm to pick up the red block using GR00T policy on port 8000") +# LeRobot local (direct inference) +policy = create_policy(provider="lerobot_local", policy_path="lerobot/act_so100_pick") +``` -# Async execution (returns immediately) -agent("Start my_arm waving using GR00T on port 8000, then check status") +## Tools Reference -# Stop running task -agent("Stop my_arm immediately") -``` +### Robot Tool (Simulation Mode) -
-Robot Constructor Parameters - -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `tool_name` | `str` | required | Name for this robot tool | -| `robot` | `str\|RobotConfig` | required | Robot type or config | -| `cameras` | `Dict` | `None` | Camera configuration | -| `port` | `str` | `None` | Serial port for robot | -| `data_config` | `str` | `None` | GR00T data config name | -| `control_frequency` | `float` | `50.0` | Control loop Hz | -| `action_horizon` | `int` | `8` | Actions per inference | +When `Robot("name")` detects simulation mode, it creates a MuJoCo `Simulation` with 35 actions accessible via natural language or direct calls. -
+### Robot Tool (Hardware Mode) ---- +| Action | Description | +|--------|-------------| +| `execute` | Blocking policy execution until complete | +| `start` | Non-blocking async start | +| `status` | Get current task status | +| `stop` | Emergency stop | ### GR00T Inference Tool -Manages GR00T policy inference services running in Docker containers. - -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `start` | `checkpoint_path`, `port`, `data_config` | Start inference service | `"Start GR00T on port 8000"` | -| `stop` | `port` | Stop service on port | `"Stop GR00T on port 8000"` | -| `status` | `port` | Check service status | `"Is GR00T running?"` | -| `list` | - | List all running services | `"List inference services"` | -| `find_containers` | - | Find GR00T containers | `"Find available containers"` | +| Action | Description | +|--------|-------------| +| `start` | Start GR00T inference service (Docker) | +| `stop` | Stop inference service | +| `status` | Check service health | +| `list` | List running services | -**TensorRT Acceleration:** +
+TensorRT Acceleration ```python agent.tool.gr00t_inference( @@ -284,208 +347,77 @@ agent.tool.gr00t_inference( port=8000, use_tensorrt=True, trt_engine_path="gr00t_engine", - vit_dtype="fp8", # ViT: fp16 or fp8 - llm_dtype="nvfp4", # LLM: fp16, nvfp4, or fp8 - dit_dtype="fp8" # DiT: fp16 or fp8 + vit_dtype="fp8", + llm_dtype="nvfp4", + dit_dtype="fp8", ) ``` ---- - -### Camera Tool - -LeRobot-based camera management with OpenCV and RealSense support. - -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `discover` | - | Find all cameras | `"Discover cameras"` | -| `capture` | `camera_id`, `save_path` | Single image capture | `"Capture from /dev/video0"` | -| `capture_batch` | `camera_ids`, `async_mode` | Multi-camera capture | `"Capture from all cameras"` | -| `record` | `camera_id`, `capture_duration` | Record video | `"Record 10s video"` | -| `preview` | `camera_id`, `preview_duration` | Live preview | `"Preview camera 0"` | -| `test` | `camera_id` | Performance test | `"Test camera speed"` | - ---- - -### Serial Tool - -Low-level serial communication for Feetech servos and custom protocols. - -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `list_ports` | - | Discover serial ports | `"List serial ports"` | -| `feetech_position` | `port`, `motor_id`, `position` | Move servo | `"Move motor 1 to center"` | -| `feetech_ping` | `port`, `motor_id` | Ping servo | `"Ping motor 1"` | -| `send` | `port`, `data/hex_data` | Send raw data | `"Send FF FF to robot"` | -| `monitor` | `port` | Monitor serial data | `"Monitor /dev/ttyACM0"` | - ---- - -### Teleoperation Tool - -Record demonstrations for imitation learning with LeRobot. - -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `start` | `robot_type`, `teleop_type` | Start teleoperation | `"Start teleoperation"` | -| `stop` | `session_name` | Stop session | `"Stop recording"` | -| `list` | - | List active sessions | `"List teleop sessions"` | -| `replay` | `dataset_repo_id`, `replay_episode` | Replay episode | `"Replay episode 5"` | - ---- - -### Pose Tool - -Store, retrieve, and execute named robot poses. - -| Action | Parameters | Description | Example | -|--------|------------|-------------|---------| -| `store_pose` | `pose_name` | Save current position | `"Save as 'home'"` | -| `load_pose` | `pose_name` | Move to saved pose | `"Go to home pose"` | -| `list_poses` | - | List all poses | `"List saved poses"` | -| `move_motor` | `motor_name`, `position` | Move single motor | `"Move gripper to 50%"` | -| `incremental_move` | `motor_name`, `delta` | Small movement | `"Move elbow +5°"` | -| `reset_to_home` | - | Safe home position | `"Reset to home"` | - ---- +
-## Supported Robots +### Additional Tools -| Robot | Config | Cameras | Description | -|-------|--------|---------|-------------| -| SO-100/SO-101 | `so100`, `so100_dualcam`, `so100_4cam` | 1-4 | Single arm desktop robot | -| Fourier GR-1 | `fourier_gr1_arms_only` | 1 | Bimanual humanoid arms | -| Bimanual Panda | `bimanual_panda_gripper` | 3 | Dual Franka Emika arms | -| Unitree G1 | `unitree_g1` | 1 | Humanoid robot platform | +| Tool | Description | +|------|-------------| +| `lerobot_camera` | Camera discovery, capture, recording (OpenCV + RealSense) | +| `lerobot_calibrate` | Motor calibration management | +| `lerobot_teleoperate` | Record demonstrations for imitation learning | +| `pose_tool` | Store, retrieve, execute named robot poses | +| `serial_tool` | Low-level Feetech servo communication |
-GR00T Data Configurations - -| Config | Video Keys | State Keys | Description | -|--------|------------|------------|-------------| -| `so100` | `video.webcam` | `state.single_arm`, `state.gripper` | Single camera | -| `so100_dualcam` | `video.front`, `video.wrist` | `state.single_arm`, `state.gripper` | Front + wrist | -| `so100_4cam` | `video.front`, `video.wrist`, `video.top`, `video.side` | `state.single_arm`, `state.gripper` | Quad camera | -| `fourier_gr1_arms_only` | `video.ego_view` | `state.left_arm`, `state.right_arm`, `state.left_hand`, `state.right_hand` | Humanoid arms | -| `bimanual_panda_gripper` | `video.right_wrist_view`, `video.left_wrist_view`, `video.front_view` | EEF pos/quat + gripper | Dual arm EEF | -| `unitree_g1` | `video.rs_view` | `state.left_arm`, `state.right_arm`, `state.left_hand`, `state.right_hand` | G1 humanoid | +🐳 Jetson Container Setup (for GR00T Inference) -
- -## Policy Providers +GR00T inference requires the Isaac-GR00T Docker container on Jetson platforms: -```mermaid -classDiagram - class Policy { - <> - +get_actions(observation, instruction) - +set_robot_state_keys(keys) - +provider_name - } - - class Gr00tPolicy { - +data_config - +policy_client: ZMQ - +get_actions() - } - - class MockPolicy { - +get_actions() - Returns random actions - } - - class CustomPolicy { - +get_actions() - Your implementation - } - - Policy <|-- Gr00tPolicy - Policy <|-- MockPolicy - Policy <|-- CustomPolicy +```bash +git clone https://github.com/dusty-nv/jetson-containers +cd jetson-containers +jetson-containers run $(autotag isaac-gr00t) & ``` -```python -from strands_robots import create_policy - -# GR00T policy (requires inference server) -policy = create_policy( - provider="groot", - data_config="so100_dualcam", - host="localhost", - port=8000 -) +**Tested Hardware:** +- NVIDIA Thor Dev Kit (Jetpack 7.0) +- NVIDIA Jetson AGX Orin (Jetpack 6.x) -# Mock policy (for testing) -policy = create_policy(provider="mock") -``` +See [Jetson Deployment Guide](https://github.com/NVIDIA/Isaac-GR00T/blob/main/deployment_scripts/README.md) for TensorRT optimization. -## Project Structure + -``` -strands-robots/ -├── strands_robots/ -│ ├── __init__.py # Package exports -│ ├── robot.py # Universal Robot class (AgentTool) -│ ├── policies/ -│ │ ├── __init__.py # Policy ABC + factory -│ │ └── groot/ -│ │ ├── __init__.py # Gr00tPolicy implementation -│ │ ├── client.py # ZMQ inference client -│ │ └── data_config.py # Robot embodiment configurations -│ └── tools/ -│ ├── gr00t_inference.py # Docker service manager -│ ├── lerobot_camera.py # Camera operations -│ ├── lerobot_calibrate.py # Calibration management -│ ├── lerobot_teleoperate.py # Recording/replay -│ ├── pose_tool.py # Pose management -│ └── serial_tool.py # Serial communication -├── test.py # Integration example -└── pyproject.toml # Package configuration -``` +## GR00T Data Configurations -## Example: Complete Workflow +| Config | Video Keys | Description | +|--------|------------|-------------| +| `so100` | `video.webcam` | Single camera setup | +| `so100_dualcam` | `video.front`, `video.wrist` | Front + wrist cameras | +| `so100_4cam` | `video.front`, `video.wrist`, `video.top`, `video.side` | Quad camera | +| `fourier_gr1_arms_only` | `video.ego_view` | Humanoid bimanual arms | +| `bimanual_panda_gripper` | 3 camera views | Dual Franka Emika arms | +| `unitree_g1` | `video.rs_view` | G1 humanoid platform | -```python -#!/usr/bin/env python3 -from strands import Agent -from strands_robots import Robot, gr00t_inference, lerobot_camera, pose_tool +## Development -# 1. Create robot with dual cameras -robot = Robot( - tool_name="orange_arm", - robot="so101_follower", - cameras={ - "wrist": {"type": "opencv", "index_or_path": "/dev/video0", "fps": 15}, - "front": {"type": "opencv", "index_or_path": "/dev/video2", "fps": 15}, - }, - port="/dev/ttyACM0", - data_config="so100_dualcam", -) +```bash +git clone https://github.com/strands-labs/robots +cd robots -# 2. Create agent with all robot tools -agent = Agent( - tools=[robot, gr00t_inference, lerobot_camera, pose_tool] -) +# Create environment +uv venv --python 3.12 .venv +source .venv/bin/activate -# 3. Start inference service -agent.tool.gr00t_inference( - action="start", - checkpoint_path="/data/checkpoints/gr00t-wave/checkpoint-300000", - port=8000, - data_config="so100_dualcam", -) +# Install with simulation + dev tools +uv pip install -e ".[sim,dev]" -# 4. Interactive control loop -while True: - user_input = input("\n🤖 > ") - if user_input.lower() in ["exit", "quit"]: - break - agent(user_input) +# Run tests (34 tests, ~1s) +uv run pytest tests/ -v -# 5. Cleanup -agent.tool.gr00t_inference(action="stop", port=8000) +# Lint +uv run ruff check . +uv run ruff format --check . ``` +See [AGENTS.md](AGENTS.md) for detailed testing guide, manual E2E validation scripts, and contribution workflow. + ## Configuration ### Environment Variables @@ -514,18 +446,19 @@ To change the cache location: `export STRANDS_ASSETS_DIR=/path/to/custom/dir` ## Contributing We welcome contributions! Please see: +- [AGENTS.md](AGENTS.md) for development guidelines - [GitHub Issues](https://github.com/strands-labs/robots/issues) for bug reports - [Pull Requests](https://github.com/strands-labs/robots/pulls) for contributions +- [Project Board](https://github.com/orgs/strands-labs/projects/2) for planned work ## License -Apache-2.0 - see [LICENSE](LICENSE) file. - -## Links +Apache-2.0 — see [LICENSE](LICENSE).
GitHubPyPI + ◆ MuJoCoNVIDIA GR00TLeRobotStrands Docs diff --git a/examples/01_sim_quickstart.py b/examples/01_sim_quickstart.py new file mode 100644 index 0000000..342b04b --- /dev/null +++ b/examples/01_sim_quickstart.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +"""Quickstart: 5 lines from import to simulation. + +The Robot() factory auto-detects mode (defaults to sim when no hardware +is connected) and returns a ready-to-use MuJoCo simulation. + +Requirements: + pip install strands-robots[sim] + +Usage: + python examples/01_sim_quickstart.py +""" + +from strands_robots import Robot + +# Create a simulated SO-100 arm — assets auto-download from MuJoCo Menagerie +sim = Robot("so100") + +# Inspect the world +state = sim.get_state() +print(state["content"][0]["text"]) + +# Step physics and render a frame +sim.step(n_steps=100) +frame = sim.render(width=640, height=480) +print(f"Rendered frame: {frame['content'][0]['text']}") + +sim.destroy() +print("✅ Simulation complete") diff --git a/examples/02_sim_agent.py b/examples/02_sim_agent.py new file mode 100644 index 0000000..95125af --- /dev/null +++ b/examples/02_sim_agent.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +"""The 5-Line Promise: natural language robot control. + +Robot() returns an AgentTool with 35+ simulation actions. Hand it to a +Strands Agent and control the robot through conversation. + +Requirements: + pip install strands-agents strands-robots[sim] + +Usage: + python examples/02_sim_agent.py +""" + +from strands import Agent + +from strands_robots import Robot + +# Factory creates a MuJoCo sim (auto-downloads assets on first run) +robot = Robot("so100") + +# The sim IS the tool — pass it directly to Agent +agent = Agent(tools=[robot]) + +# Natural language → simulation actions +result = agent("Get the simulation state, then run a mock policy for 1 second in fast mode") +print(result) + +robot.destroy() diff --git a/examples/03_sim_recording.py b/examples/03_sim_recording.py new file mode 100644 index 0000000..05e1aaf --- /dev/null +++ b/examples/03_sim_recording.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +"""Record simulation rollouts as LeRobot v3 datasets. + +Runs a mock policy in MuJoCo, captures joint states + video, and saves +everything as a LeRobot-compatible dataset (parquet + AV1 video). + +Requirements: + pip install strands-robots[sim] + +Usage: + python examples/03_sim_recording.py +""" + +from strands_robots import Robot + +sim = Robot("so100") + +# Start recording — creates LeRobot v3 dataset structure +sim.start_recording( + repo_id="local/so100_demo", + task="reach target", + fps=50, + root="/tmp/so100_dataset", +) + +# Run a mock policy (random actions) for 2 seconds +result = sim.run_policy( + robot_name="so100", + policy_provider="mock", + instruction="reach target", + duration=2.0, + fast_mode=True, + record_video="/tmp/so100_rollout.mp4", + video_fps=30, +) +print(result["content"][0]["text"]) + +# Finalize the episode +stop = sim.stop_recording() +print(stop["content"][0]["text"]) + +sim.destroy() +print("✅ Dataset saved to /tmp/so100_dataset/") diff --git a/examples/04_real_hardware.py b/examples/04_real_hardware.py new file mode 100644 index 0000000..a17a38a --- /dev/null +++ b/examples/04_real_hardware.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +"""Control real robot hardware with the same factory API. + +Robot(mode="real") returns a HardwareRobot backed by LeRobot. The same +Agent workflow works — just swap the mode. + +Requirements: + pip install strands-agents strands-robots[lerobot] + # Hardware: SO-100/SO-101 arm connected via USB (Feetech servos) + +Usage: + # Auto-detect (switches to real if USB servo controller found) + STRANDS_ROBOT_MODE=real python examples/04_real_hardware.py + + # Or set mode explicitly in code (see below) +""" + +from strands import Agent + +from strands_robots import Robot + +# Explicit real mode with camera config +robot = Robot( + "so100", + mode="real", + cameras={ + "wrist": { + "type": "opencv", + "index_or_path": "/dev/video0", + "fps": 15, + "fourcc": "MJPG", + }, + }, +) + +# Same Agent interface as simulation +agent = Agent(tools=[robot]) +agent("Connect to the robot, read the current joint positions, and report status") diff --git a/examples/05_real_groot_policy.py b/examples/05_real_groot_policy.py new file mode 100644 index 0000000..be38128 --- /dev/null +++ b/examples/05_real_groot_policy.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +"""Run NVIDIA GR00T policy on real hardware. + +Starts a GR00T inference server, connects to a real SO-101 arm with +dual cameras, and runs the policy through an Agent. + +Requirements: + pip install strands-agents strands-robots[all] + # Hardware: SO-101 arm + 2 USB cameras + # Model: Download from HuggingFace (e.g., cagataydev/gr00t-wave) + +Usage: + python examples/05_real_groot_policy.py +""" + +from strands import Agent + +from strands_robots import Robot, gr00t_inference, lerobot_camera, pose_tool + +# Real robot with dual cameras +robot = Robot( + "so101", + mode="real", + cameras={ + "wrist": { + "type": "opencv", + "index_or_path": "/dev/video0", + "fps": 15, + "fourcc": "MJPG", + }, + "front": { + "type": "opencv", + "index_or_path": "/dev/video2", + "fps": 15, + "fourcc": "MJPG", + }, + }, +) + +# Build agent with robot + inference tools +agent = Agent( + tools=[robot, gr00t_inference, lerobot_camera, pose_tool], +) + +# Start GR00T inference server +agent.tool.gr00t_inference( + action="start", + checkpoint_path="/data/checkpoints/gr00t-wave/checkpoint-300000", + port=5555, + data_config="so100_dualcam", + embodiment_tag="new_embodiment", +) + +# Interactive control loop +print("GR00T policy running. Type instructions or 'quit' to exit.") +while True: + query = input("\n# ") + if query.lower() in ("quit", "exit", "q"): + break + agent(query) + +agent.tool.gr00t_inference(action="stop", port=5555) diff --git a/examples/06_list_robots.py b/examples/06_list_robots.py new file mode 100644 index 0000000..4066514 --- /dev/null +++ b/examples/06_list_robots.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +"""List all supported robots and their capabilities. + +The registry contains 38+ robots with simulation assets and/or hardware +support. Use list_robots() to discover what's available. + +Requirements: + pip install strands-robots + +Usage: + python examples/06_list_robots.py +""" + +from strands_robots import list_robots + +print("=== All Robots ===") +for r in list_robots(mode="all"): + sim = "🎮" if r.get("has_sim") else " " + real = "🔧" if r.get("has_real") else " " + print(f" {sim} {real} {r['name']:25s} {r.get('description', '')}") + +print(f"\n=== Sim-only ({len(list_robots(mode='sim'))} robots) ===") +for r in list_robots(mode="sim")[:5]: + print(f" {r['name']}") + +print(f"\n=== Real hardware ({len(list_robots(mode='real'))} robots) ===") +for r in list_robots(mode="real"): + print(f" {r['name']}") diff --git a/examples/act_policy_simulation.py b/examples/act_policy_simulation.py new file mode 100755 index 0000000..99fc32a --- /dev/null +++ b/examples/act_policy_simulation.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +"""Run a HuggingFace ACT policy in MuJoCo and export a LeRobot dataset. + +Downloads a pretrained ACT policy, runs it in simulation, records multi-camera +video + joint data as a LeRobot v3 dataset. The full pipeline in ~20 lines. + +Requirements: + pip install strands-robots[sim] lerobot torch + +Usage: + python examples/act_policy_simulation.py +""" + +from strands_robots import Robot + +# 1. Create simulated Aloha bimanual robot (14 actuators, 6 cameras) +sim = Robot("aloha") + +# 2. Start recording a LeRobot dataset (parquet + AV1 video) +sim.start_recording( + repo_id="local/act_aloha_sim_demo", + task="transfer cube", + fps=50, + root="/tmp/act_aloha_dataset", +) + +# 3. Run a pretrained ACT policy from HuggingFace (51M params) +result = sim.run_policy( + robot_name="aloha", + policy_provider="lerobot_local", + pretrained_name_or_path="lerobot/act_aloha_sim_transfer_cube_human", + instruction="transfer cube", + duration=2.0, # seconds of sim time + fast_mode=True, # no wall-clock sleep between steps + record_video="/tmp/act_aloha_rollout.mp4", +) +print(result["content"][0]["text"]) + +# 4. Save the episode to disk +stop = sim.stop_recording() +print(stop["content"][0]["text"]) + +sim.destroy() diff --git a/examples/physics_agent.py b/examples/physics_agent.py new file mode 100644 index 0000000..55e1a25 --- /dev/null +++ b/examples/physics_agent.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +"""Strands Agent with full MuJoCo physics introspection. + +Demonstrates the new Physics API — direct Python access to MuJoCo C functions: + mj_ray, mj_jacBody, mj_applyFT, mj_fullM, mj_inverse, mj_contactForce, + mj_getState/mj_setState, mj_energyPos/mj_energyVel, and more. + +An agent that can reason about physics: cast rays, compute Jacobians, +apply forces, checkpoint/restore state, read sensors, and analyze contacts +— all through natural language. + +Requirements: + pip install strands-agents strands-robots[sim] + +Usage: + python examples/physics_agent.py +""" + +from strands import Agent + +from strands_robots import Robot + +# Create a simulated SO-100 robot arm +sim = Robot("so100") + +# Give the agent the simulation tool — all 50+ actions available via NL +agent = Agent( + tools=[sim], + system_prompt=( + "You are a robotics physicist. You have a simulated SO-100 robot arm " + "in MuJoCo. Use the simulation tool to explore its physics. " + "Be concise and use real numbers from the simulation." + ), +) + +# ─── Example 1: Full physics analysis in natural language ──────────────────── +print("=" * 70) +print("Example 1: Agent-driven physics analysis") +print("=" * 70) + +result = agent( + "Analyze the robot's physics: " + "1) Get the total mass breakdown, " + "2) Compute the mass matrix and tell me its condition number, " + "3) Read all sensor values, " + "4) Get the system energy. " + "Summarize the physical properties." +) +print(result) + +# ─── Example 2: Raycasting for obstacle detection ─────────────────────────── +print("\n" + "=" * 70) +print("Example 2: Agent uses raycasting for spatial reasoning") +print("=" * 70) + +result = agent( + "Cast rays downward from 5 points above the robot (height=1m) at " + "x=-0.2, -0.1, 0, 0.1, 0.2 (y=0) to map what's below. " + "Use multi_raycast for efficiency. Report the distance map." +) +print(result) + +# ─── Example 3: State checkpointing + force experiments ───────────────────── +print("\n" + "=" * 70) +print("Example 3: Save state → experiment → restore") +print("=" * 70) + +result = agent( + "I want to experiment with forces without breaking the sim: " + "1) Save the current state as 'pristine', " + "2) Step 200 times to let things settle, " + "3) Get the energy, " + "4) Apply a 50N upward force to the robot's end-effector body, " + "5) Step 100 more times, " + "6) Get the energy again and compare, " + "7) Restore the 'pristine' state, " + "8) Verify we're back by checking energy matches the original." +) +print(result) + +# ─── Example 4: Jacobian + inverse dynamics ───────────────────────────────── +print("\n" + "=" * 70) +print("Example 4: Dynamics analysis") +print("=" * 70) + +result = agent( + "Compute the Jacobian for the end-effector and run inverse dynamics. " + "What forces are needed at each joint to hold the current pose?" +) +print(result) + +# ─── Example 5: Contact analysis ──────────────────────────────────────────── +print("\n" + "=" * 70) +print("Example 5: Contact force analysis") +print("=" * 70) + +result = agent( + "Step the simulation 500 times to let everything settle, " + "then get detailed contact forces. " + "Which bodies are in contact and what are the normal forces?" +) +print(result) + +# Clean up +sim.destroy() +print("\n✅ Done — all physics examples complete.") From 88427afe1731cb36e23782a99faefbc90ee910a7 Mon Sep 17 00:00:00 2001 From: cagataycali Date: Wed, 1 Apr 2026 15:14:56 -0400 Subject: [PATCH 2/3] fix: add mock fallback comment in ACT example, fix emoji nit - ACT example now documents mock provider as lightweight alternative - Quickstart uses ASCII instead of emoji for terminal compatibility --- examples/01_sim_quickstart.py | 2 +- examples/act_policy_simulation.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/01_sim_quickstart.py b/examples/01_sim_quickstart.py index 342b04b..037bbff 100644 --- a/examples/01_sim_quickstart.py +++ b/examples/01_sim_quickstart.py @@ -26,4 +26,4 @@ print(f"Rendered frame: {frame['content'][0]['text']}") sim.destroy() -print("✅ Simulation complete") +print("Done — simulation complete") diff --git a/examples/act_policy_simulation.py b/examples/act_policy_simulation.py index 99fc32a..525bcc1 100755 --- a/examples/act_policy_simulation.py +++ b/examples/act_policy_simulation.py @@ -25,6 +25,9 @@ ) # 3. Run a pretrained ACT policy from HuggingFace (51M params) +# NOTE: This downloads model weights (~200MB) on first run. +# For a lightweight test without downloading, use policy_provider="mock": +# sim.run_policy(robot_name="aloha", policy_provider="mock", duration=2.0) result = sim.run_policy( robot_name="aloha", policy_provider="lerobot_local", From 914cbd7947ca9889fc9121ae2d59b0fac373e81d Mon Sep 17 00:00:00 2001 From: cagataycali Date: Wed, 15 Apr 2026 05:10:52 +0000 Subject: [PATCH 3/3] docs: add behind-the-scenes comment for Robot() in sim agent example Explain what Robot('so100') does internally: mode detection, backend selection, Simulation construction, add_robot() call, and asset auto-download. Addresses yinsong1986 review feedback. --- examples/02_sim_agent.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/02_sim_agent.py b/examples/02_sim_agent.py index 95125af..7d506c5 100644 --- a/examples/02_sim_agent.py +++ b/examples/02_sim_agent.py @@ -15,7 +15,11 @@ from strands_robots import Robot -# Factory creates a MuJoCo sim (auto-downloads assets on first run) +# Robot("so100") auto-detects mode="sim", picks the "mujoco" backend, +# constructs a Simulation instance, calls add_robot() to load the SO-100 +# model (auto-downloading URDF/meshes on first run), and returns that +# Simulation as an AgentTool. You get full access to all Simulation +# actions — step(), render(), run_policy(), get_observation(), etc. robot = Robot("so100") # The sim IS the tool — pass it directly to Agent