From a690a409c90534d2ab59b3aff30a3aed7f16e547 Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Mon, 18 Aug 2025 14:10:00 -0700 Subject: [PATCH 1/6] Porting SkillGen to the public repo --- CONTRIBUTORS.md | 1 + .../isaaclab_mimic/annotate_demos.py | 100 +- .../isaaclab_mimic/generate_dataset.py | 60 +- .../envs/manager_based_rl_mimic_env.py | 16 + .../isaaclab/isaaclab/envs/mimic_env_cfg.py | 9 + .../isaaclab_mimic/datagen/data_generator.py | 422 +++- .../isaaclab_mimic/datagen/datagen_info.py | 18 + .../datagen/datagen_info_pool.py | 123 +- .../isaaclab_mimic/datagen/generation.py | 21 +- .../isaaclab_mimic/envs/__init__.py | 20 + .../franka_bin_stack_ik_rel_mimic_env_cfg.py | 93 + .../envs/franka_stack_ik_rel_mimic_env.py | 23 + .../franka_stack_ik_rel_skillgen_env_cfg.py | 135 ++ .../motion_planners/base_motion_planner.py | 130 ++ .../motion_planners/curobo/curobo_planner.py | 1948 +++++++++++++++++ .../curobo/curobo_planner_config.py | 285 +++ .../motion_planners/curobo/plan_visualizer.py | 968 ++++++++ .../test/test_curobo_planner_cube_stack.py | 248 +++ .../curobo/test/test_curobo_planner_franka.py | 173 ++ .../stack/config/franka/__init__.py | 21 + .../config/franka/bin_stack_ik_rel_env_cfg.py | 35 + .../franka/bin_stack_joint_pos_env_cfg.py | 203 ++ .../config/franka/stack_joint_pos_env_cfg.py | 13 +- 23 files changed, 4894 insertions(+), 171 deletions(-) create mode 100644 source/isaaclab_mimic/isaaclab_mimic/envs/franka_bin_stack_ik_rel_mimic_env_cfg.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_skillgen_env_cfg.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py create mode 100644 source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py create mode 100644 source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_ik_rel_env_cfg.py create mode 100644 source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index bde83712b64..e4e76f68944 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -99,6 +99,7 @@ Guidelines for modifications: * Miguel Alonso Jr * Mingyu Lee * Muhong Guo +* Neel Anand Jawale * Nicola Loi * Norbert Cygiert * Nuoyan Chen (Alvin) diff --git a/scripts/imitation_learning/isaaclab_mimic/annotate_demos.py b/scripts/imitation_learning/isaaclab_mimic/annotate_demos.py index 69f975ba858..4588f03fbc8 100644 --- a/scripts/imitation_learning/isaaclab_mimic/annotate_demos.py +++ b/scripts/imitation_learning/isaaclab_mimic/annotate_demos.py @@ -8,6 +8,7 @@ """ import argparse +import math from isaaclab.app import AppLauncher @@ -33,6 +34,12 @@ default=False, help="Enable Pinocchio.", ) +parser.add_argument( + "--annotate_subtask_start_signals", + action="store_true", + default=False, + help="Enable annotating start points of subtasks.", +) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) @@ -123,6 +130,20 @@ class PreStepDatagenInfoRecorderCfg(RecorderTermCfg): class_type: type[RecorderTerm] = PreStepDatagenInfoRecorder +class PreStepSubtaskStartsObservationsRecorder(RecorderTerm): + """Recorder term that records the subtask start observations in each step.""" + + def record_pre_step(self): + return "obs/datagen_info/subtask_start_signals", self._env.get_subtask_start_signals() + + +@configclass +class PreStepSubtaskStartsObservationsRecorderCfg(RecorderTermCfg): + """Configuration for the subtask start observations recorder term.""" + + class_type: type[RecorderTerm] = PreStepSubtaskStartsObservationsRecorder + + class PreStepSubtaskTermsObservationsRecorder(RecorderTerm): """Recorder term that records the subtask completion observations in each step.""" @@ -142,6 +163,7 @@ class MimicRecorderManagerCfg(ActionStateRecorderManagerCfg): """Mimic specific recorder terms.""" record_pre_step_datagen_info = PreStepDatagenInfoRecorderCfg() + record_pre_step_subtask_start_signals = PreStepSubtaskStartsObservationsRecorderCfg() record_pre_step_subtask_term_signals = PreStepSubtaskTermsObservationsRecorderCfg() @@ -189,11 +211,15 @@ def main(): env_cfg.terminations = None # Set up recorder terms for mimic annotations - env_cfg.recorders: MimicRecorderManagerCfg = MimicRecorderManagerCfg() + env_cfg.recorders = MimicRecorderManagerCfg() if not args_cli.auto: # disable subtask term signals recorder term if in manual mode env_cfg.recorders.record_pre_step_subtask_term_signals = None + if not args_cli.auto or (args_cli.auto and not args_cli.annotate_subtask_start_signals): + # disable subtask start signals recorder term if in manual mode or no need for subtask start annotations + env_cfg.recorders.record_pre_step_subtask_start_signals = None + env_cfg.recorders.dataset_export_dir_path = output_dir env_cfg.recorders.dataset_filename = output_file_name @@ -210,13 +236,36 @@ def main(): "The environment does not implement the get_subtask_term_signals method required " "to run automatic annotations." ) + if ( + args_cli.annotate_subtask_start_signals + and env.get_subtask_start_signals.__func__ is ManagerBasedRLMimicEnv.get_subtask_start_signals + ): + raise NotImplementedError( + "The environment does not implement the get_subtask_start_signals method required " + "to run automatic annotations." + ) else: # get subtask termination signal names for each eef from the environment configs subtask_term_signal_names = {} + subtask_start_signal_names = {} for eef_name, eef_subtask_configs in env.cfg.subtask_configs.items(): + subtask_start_signal_names[eef_name] = ( + [subtask_config.subtask_term_signal for subtask_config in eef_subtask_configs] + if args_cli.annotate_subtask_start_signals + else [] + ) subtask_term_signal_names[eef_name] = [ subtask_config.subtask_term_signal for subtask_config in eef_subtask_configs ] + # Validation: if annotating start signals, every subtask (including the last) must have a name + if args_cli.annotate_subtask_start_signals: + if any(name in (None, "") for name in subtask_start_signal_names[eef_name]): + raise ValueError( + f"Missing 'subtask_term_signal' for one or more subtasks in eef '{eef_name}'. When" + " '--annotate_subtask_start_signals' is enabled, each subtask (including the last) must" + " specify 'subtask_term_signal'. The last subtask's term signal name is used as the final" + " start signal name." + ) # no need to annotate the last subtask term signal, so remove it from the list subtask_term_signal_names[eef_name].pop() @@ -250,7 +299,7 @@ def main(): is_episode_annotated_successfully = annotate_episode_in_auto_mode(env, episode, success_term) else: is_episode_annotated_successfully = annotate_episode_in_manual_mode( - env, episode, success_term, subtask_term_signal_names + env, episode, success_term, subtask_term_signal_names, subtask_start_signal_names ) if is_episode_annotated_successfully and not skip_episode: @@ -361,6 +410,12 @@ def annotate_episode_in_auto_mode( if not torch.any(signal_flags): is_episode_annotated_successfully = False print(f'\tDid not detect completion for the subtask "{signal_name}".') + if args_cli.annotate_subtask_start_signals: + subtask_start_signal_dict = annotated_episode.data["obs"]["datagen_info"]["subtask_start_signals"] + for signal_name, signal_flags in subtask_start_signal_dict.items(): + if not torch.any(signal_flags): + is_episode_annotated_successfully = False + print(f'\tDid not detect start for the subtask "{signal_name}".') return is_episode_annotated_successfully @@ -369,6 +424,7 @@ def annotate_episode_in_manual_mode( episode: EpisodeData, success_term: TerminationTermCfg | None = None, subtask_term_signal_names: dict[str, list[str]] = {}, + subtask_start_signal_names: dict[str, list[str]] = {}, ) -> bool: """Annotates an episode in manual mode. @@ -380,16 +436,18 @@ def annotate_episode_in_manual_mode( episode: The recorded episode data to replay. success_term: Optional termination term to check for task success. subtask_term_signal_names: Dictionary mapping eef names to lists of subtask term signal names. - + subtask_start_signal_names: Dictionary mapping eef names to lists of subtask start signal names. Returns: True if the episode was successfully annotated, False otherwise. """ global is_paused, marked_subtask_action_indices, skip_episode # iterate over the eefs for marking subtask term signals subtask_term_signal_action_indices = {} + subtask_start_signal_action_indices = {} for eef_name, eef_subtask_term_signal_names in subtask_term_signal_names.items(): + eef_subtask_start_signal_names = subtask_start_signal_names[eef_name] # skip if no subtask annotation is needed for this eef - if len(eef_subtask_term_signal_names) == 0: + if len(eef_subtask_term_signal_names) == 0 and len(eef_subtask_start_signal_names) == 0: continue while True: @@ -397,6 +455,8 @@ def annotate_episode_in_manual_mode( skip_episode = False print(f'\tPlaying the episode for subtask annotations for eef "{eef_name}".') print("\tSubtask signals to annotate:") + if len(eef_subtask_start_signal_names) > 0: + print(f"\t\t- Start:\t{eef_subtask_start_signal_names}") print(f"\t\t- Termination:\t{eef_subtask_term_signal_names}") print('\n\tPress "N" to begin.') @@ -410,14 +470,24 @@ def annotate_episode_in_manual_mode( return False print(f"\tSubtasks marked at action indices: {marked_subtask_action_indices}") - expected_subtask_signal_count = len(eef_subtask_term_signal_names) + expected_subtask_signal_count = len(eef_subtask_term_signal_names) + len(eef_subtask_start_signal_names) if task_success_result and expected_subtask_signal_count == len(marked_subtask_action_indices): print(f'\tAll {expected_subtask_signal_count} subtask signals for eef "{eef_name}" were annotated.') for marked_signal_index in range(expected_subtask_signal_count): - # collect subtask term signal action indices - subtask_term_signal_action_indices[eef_subtask_term_signal_names[marked_signal_index]] = ( - marked_subtask_action_indices[marked_signal_index] - ) + if args_cli.annotate_subtask_start_signals and marked_signal_index % 2 == 0: + subtask_start_signal_action_indices[ + eef_subtask_start_signal_names[int(marked_signal_index / 2)] + ] = marked_subtask_action_indices[marked_signal_index] + if not args_cli.annotate_subtask_start_signals: + # Direct mapping when only collecting termination signals + subtask_term_signal_action_indices[eef_subtask_term_signal_names[marked_signal_index]] = ( + marked_subtask_action_indices[marked_signal_index] + ) + elif args_cli.annotate_subtask_start_signals and marked_signal_index % 2 == 1: + # Every other signal is a termination when collecting both types + subtask_term_signal_action_indices[ + eef_subtask_term_signal_names[math.floor(marked_signal_index / 2)] + ] = marked_subtask_action_indices[marked_signal_index] break if not task_success_result: @@ -442,6 +512,18 @@ def annotate_episode_in_manual_mode( subtask_signals = torch.ones(len(episode.data["actions"]), dtype=torch.bool) subtask_signals[:subtask_term_signal_action_index] = False annotated_episode.add(f"obs/datagen_info/subtask_term_signals/{subtask_term_signal_name}", subtask_signals) + + if args_cli.annotate_subtask_start_signals: + for ( + subtask_start_signal_name, + subtask_start_signal_action_index, + ) in subtask_start_signal_action_indices.items(): + subtask_signals = torch.ones(len(episode.data["actions"]), dtype=torch.bool) + subtask_signals[:subtask_start_signal_action_index] = False + annotated_episode.add( + f"obs/datagen_info/subtask_start_signals/{subtask_start_signal_name}", subtask_signals + ) + return True diff --git a/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py b/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py index 4ab8b309c26..d1f4d065e35 100644 --- a/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py +++ b/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py @@ -11,6 +11,7 @@ """Launch Isaac Sim Simulator first.""" import argparse +import os from isaaclab.app import AppLauncher @@ -39,6 +40,12 @@ default=False, help="Enable Pinocchio.", ) +parser.add_argument( + "--use_skillgen", + action="store_true", + default=False, + help="use skillgen to generate motion trajectories", +) # append AppLauncher cli args AppLauncher.add_app_launcher_args(parser) # parse the arguments @@ -86,6 +93,12 @@ def main(): task_name = args_cli.task.split(":")[-1] env_name = task_name or get_env_name_from_dataset(args_cli.input_file) + # Export a flag for cfg-time decisions (env cfg __post_init__) + if args_cli.use_skillgen: + os.environ["ISAACLAB_USE_SKILLGEN"] = "1" + else: + os.environ.pop("ISAACLAB_USE_SKILLGEN", None) + # Configure environment env_cfg, success_term = setup_env_config( env_name=env_name, @@ -96,27 +109,55 @@ def main(): generation_num_trials=args_cli.generation_num_trials, ) - # create environment + # Create environment env = gym.make(env_name, cfg=env_cfg).unwrapped if not isinstance(env, ManagerBasedRLMimicEnv): raise ValueError("The environment should be derived from ManagerBasedRLMimicEnv") - # check if the mimic API from this environment contains decprecated signatures + # Check if the mimic API from this environment contains decprecated signatures if "action_noise_dict" not in inspect.signature(env.target_eef_pose_to_action).parameters: omni.log.warn( f'The "noise" parameter in the "{env_name}" environment\'s mimic API "target_eef_pose_to_action", ' "is deprecated. Please update the API to take action_noise_dict instead." ) - # set seed for generation + # Set seed for generation random.seed(env.cfg.datagen_config.seed) np.random.seed(env.cfg.datagen_config.seed) torch.manual_seed(env.cfg.datagen_config.seed) - # reset before starting + # Reset before starting env.reset() + motion_planners = None + if args_cli.use_skillgen: + from isaaclab_mimic.motion_planners.curobo.curobo_planner import CuroboPlanner + from isaaclab_mimic.motion_planners.curobo.curobo_planner_config import CuroboPlannerConfig + + # Create one motion planner per environment + motion_planners = {} + for env_id in range(num_envs): + print(f"Initializing motion planner for environment {env_id}") + # Create a config instance from the task name + planner_config = CuroboPlannerConfig.from_task_name(env_name) + + # Ensure visualization is only enabled for the first environment + # If not, sphere and plan visualization will be too slow in isaac lab + # It is efficient to visualize the spheres and plan for the first environment in rerun + if env_id != 0: + planner_config.visualize_spheres = False + planner_config.visualize_plan = False + + motion_planners[env_id] = CuroboPlanner( + env=env, + robot=env.scene["robot"], + config=planner_config, # Pass the config object + env_id=env_id, # Pass environment ID + ) + + env.cfg.datagen_config.use_skillgen = True + # Setup and run async data generation async_components = setup_async_generation( env=env, @@ -124,6 +165,7 @@ def main(): input_file=args_cli.input_file, success_term=success_term, pause_subtask=args_cli.pause_subtask, + motion_planners=motion_planners, # Pass the motion planners dictionary ) try: @@ -147,6 +189,14 @@ def main(): print("Remaining async tasks cancelled and cleaned up.") except Exception as e: print(f"Error cancelling remaining async tasks: {e}") + # Cleanup of motion planners and their visualizers + if motion_planners is not None: + for env_id, planner in motion_planners.items(): + if hasattr(planner, "plan_visualizer") and planner.plan_visualizer is not None: + print(f"Closing plan visualizer for environment {env_id}") + planner.plan_visualizer.close() + planner.plan_visualizer = None + motion_planners.clear() if __name__ == "__main__": @@ -154,5 +204,5 @@ def main(): main() except KeyboardInterrupt: print("\nProgram interrupted by user. Exiting...") - # close sim app + # Close sim app simulation_app.close() diff --git a/source/isaaclab/isaaclab/envs/manager_based_rl_mimic_env.py b/source/isaaclab/isaaclab/envs/manager_based_rl_mimic_env.py index 07a33f416a7..781f89ccbeb 100644 --- a/source/isaaclab/isaaclab/envs/manager_based_rl_mimic_env.py +++ b/source/isaaclab/isaaclab/envs/manager_based_rl_mimic_env.py @@ -117,6 +117,22 @@ def get_object_poses(self, env_ids: Sequence[int] | None = None): ) return object_pose_matrix + def get_subtask_start_signals(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: + """ + Gets a dictionary of start signal flags for each subtask in a task. The flag is 1 + when the subtask has started and 0 otherwise. The implementation of this method is + required if intending to enable automatic subtask start signal annotation when running the + dataset annotation tool. This method can be kept unimplemented if intending to use manual + subtask start signal annotation. + + Args: + env_ids: Environment indices to get the start signals for. If None, all envs are considered. + + Returns: + A dictionary start signal flags (False or True) for each subtask. + """ + raise NotImplementedError + def get_subtask_term_signals(self, env_ids: Sequence[int] | None = None) -> dict[str, torch.Tensor]: """ Gets a dictionary of termination signal flags for each subtask in a task. The flag is 1 diff --git a/source/isaaclab/isaaclab/envs/mimic_env_cfg.py b/source/isaaclab/isaaclab/envs/mimic_env_cfg.py index ecd2b4fdb2e..53b48de13e1 100644 --- a/source/isaaclab/isaaclab/envs/mimic_env_cfg.py +++ b/source/isaaclab/isaaclab/envs/mimic_env_cfg.py @@ -73,6 +73,9 @@ class DataGenConfig: generation_interpolate_from_last_target_pose: bool = True """Whether to interpolate from last target pose.""" + use_skillgen: bool = False + """Whether to use skillgen to generate motion trajectories.""" + @configclass class SubTaskConfig: @@ -115,6 +118,12 @@ class SubTaskConfig: first_subtask_start_offset_range: tuple = (0, 0) """Range for start offset of the first subtask.""" + subtask_start_offset_range: tuple = (0, 0) + """Range for start offset of the subtask (only used if use_skillgen is True) + + Note: This value overrides the first_subtask_start_offset_range when skillgen is enabled + """ + subtask_term_offset_range: tuple = (0, 0) """Range for offsetting subtask termination.""" diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py index 8e4d1c285d4..35e50de9eeb 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/data_generator.py @@ -9,6 +9,7 @@ import asyncio import numpy as np import torch +from typing import Any import isaaclab.utils.math as PoseUtils from isaaclab.envs import ( @@ -69,13 +70,13 @@ def transform_source_data_segment_using_object_pose( transformed_eef_poses: transformed pose sequence (shape [T, 4, 4]) """ - # transform source end effector poses to be relative to source object frame + # Transform source end effector poses to be relative to source object frame src_eef_poses_rel_obj = PoseUtils.pose_in_A_to_pose_in_B( pose_in_A=src_eef_poses, pose_A_in_B=PoseUtils.pose_inv(src_obj_pose[None]), ) - # apply relative poses to current object frame to obtain new target eef poses + # Apply relative poses to current object frame to obtain new target eef poses transformed_eef_poses = PoseUtils.pose_in_A_to_pose_in_B( pose_in_A=src_eef_poses_rel_obj, pose_A_in_B=obj_pose[None], @@ -159,7 +160,7 @@ def __init__( assert isinstance(self.env_cfg, MimicEnvCfg) self.dataset_path = dataset_path - # sanity check on task spec offset ranges - final subtask should not have any offset randomization + # Sanity check on task spec offset ranges - final subtask should not have any offset randomization for subtask_configs in self.env_cfg.subtask_configs.values(): assert subtask_configs[-1].subtask_term_offset_range[0] == 0 assert subtask_configs[-1].subtask_term_offset_range[1] == 0 @@ -191,13 +192,13 @@ def randomize_subtask_boundaries(self) -> dict[str, np.ndarray]: """ Apply random offsets to sample subtask boundaries according to the task spec. Recall that each demonstration is segmented into a set of subtask segments, and the - end index of each subtask can have a random offset. + end index (and start index when skillgen is enabled) of each subtask can have a random offset. """ randomized_subtask_boundaries = {} for eef_name, subtask_boundaries in self.src_demo_datagen_info_pool.subtask_boundaries.items(): - # initial subtask start and end indices - shape (N, S, 2) + # Initial subtask start and end indices - shape (N, S, 2) subtask_boundaries = np.array(subtask_boundaries) # Randomize the start of the first subtask @@ -208,27 +209,38 @@ def randomize_subtask_boundaries(self) -> dict[str, np.ndarray]: ) subtask_boundaries[:, 0, 0] += first_subtask_start_offsets - # for each subtask (except last one), sample all end offsets at once for each demonstration - # add them to subtask end indices, and then set them as the start indices of next subtask too - for i in range(subtask_boundaries.shape[1] - 1): + # For each subtask, sample all end offsets at once for each demonstration + # Add them to subtask end indices, and then set them as the start indices of next subtask too + for i in range(subtask_boundaries.shape[1]): + # If skillgen is enabled, sample a random start offset to increase demonstration variety. + if self.env_cfg.datagen_config.use_skillgen: + start_offset = np.random.randint( + low=self.env_cfg.subtask_configs[eef_name][i].subtask_start_offset_range[0], + high=self.env_cfg.subtask_configs[eef_name][i].subtask_start_offset_range[1] + 1, + size=subtask_boundaries.shape[0], + ) + subtask_boundaries[:, i, 0] += start_offset + elif i > 0: + # Without skillgen, the start of a subtask is the end of the previous one. + subtask_boundaries[:, i, 0] = subtask_boundaries[:, i - 1, 1] + + # Sample end offset for each demonstration end_offsets = np.random.randint( low=self.env_cfg.subtask_configs[eef_name][i].subtask_term_offset_range[0], high=self.env_cfg.subtask_configs[eef_name][i].subtask_term_offset_range[1] + 1, size=subtask_boundaries.shape[0], ) subtask_boundaries[:, i, 1] = subtask_boundaries[:, i, 1] + end_offsets - # don't forget to set these as start indices for next subtask too - subtask_boundaries[:, i + 1, 0] = subtask_boundaries[:, i, 1] - # ensure non-empty subtasks + # Ensure non-empty subtasks assert np.all((subtask_boundaries[:, :, 1] - subtask_boundaries[:, :, 0]) > 0), "got empty subtasks!" - # ensure subtask indices increase (both starts and ends) + # Ensure subtask indices increase (both starts and ends) assert np.all( (subtask_boundaries[:, 1:, :] - subtask_boundaries[:, :-1, :]) > 0 ), "subtask indices do not strictly increase" - # ensure subtasks are in order + # Ensure subtasks are in order subtask_inds_flat = subtask_boundaries.reshape(subtask_boundaries.shape[0], -1) assert np.all((subtask_inds_flat[:, 1:] - subtask_inds_flat[:, :-1]) >= 0), "subtask indices not in order" @@ -269,18 +281,18 @@ def select_source_demo( # demo, so that it can be used by the selection strategy. src_subtask_datagen_infos = [] for i in range(len(self.src_demo_datagen_info_pool.datagen_infos)): - # datagen info over all timesteps of the src trajectory + # Datagen info over all timesteps of the src trajectory src_ep_datagen_info = self.src_demo_datagen_info_pool.datagen_infos[i] - # time indices for subtask + # Time indices for subtask subtask_start_ind = src_demo_current_subtask_boundaries[i][0] subtask_end_ind = src_demo_current_subtask_boundaries[i][1] - # get subtask segment using indices + # Get subtask segment using indices src_subtask_datagen_infos.append( DatagenInfo( eef_pose=src_ep_datagen_info.eef_pose[eef_name][subtask_start_ind:subtask_end_ind], - # only include object pose for relevant object in subtask + # Only include object pose for relevant object in subtask object_poses=( { subtask_object_name: src_ep_datagen_info.object_poses[subtask_object_name][ @@ -290,17 +302,17 @@ def select_source_demo( if (subtask_object_name is not None) else None ), - # subtask termination signal is unused + # Subtask termination signal is unused subtask_term_signals=None, target_eef_pose=src_ep_datagen_info.target_eef_pose[eef_name][subtask_start_ind:subtask_end_ind], gripper_action=src_ep_datagen_info.gripper_action[eef_name][subtask_start_ind:subtask_end_ind], ) ) - # make selection strategy object + # Make selection strategy object selection_strategy_obj = make_selection_strategy(selection_strategy_name) - # run selection + # Run selection if selection_strategy_kwargs is None: selection_strategy_kwargs = dict() selected_src_demo_ind = selection_strategy_obj.select_source_demo( @@ -312,30 +324,46 @@ def select_source_demo( return selected_src_demo_ind - def generate_trajectory( + def generate_eef_subtask_trajectory( self, env_id: int, eef_name: str, subtask_ind: int, - all_randomized_subtask_boundaries: dict[str, np.ndarray], - runtime_subtask_constraints_dict: dict[tuple[str, int], dict], - selected_src_demo_inds: dict[str, int | None], - prev_executed_traj: dict[str, list[Waypoint] | None], - ) -> list[Waypoint]: + all_randomized_subtask_boundaries: dict, + runtime_subtask_constraints_dict: dict, + selected_src_demo_inds: dict, + ) -> WaypointTrajectory: """ - Generate a trajectory for the given subtask. + Build a transformed waypoint trajectory for a single subtask of an end-effector. + + This method selects a source demonstration segment for the specified subtask, + slices the corresponding EEF poses/targets/gripper actions using the randomized + subtask boundaries, optionally prepends the first robot EEF pose (to interpolate + from the robot pose instead of the first target), applies an object/coordination + based transform to the pose sequence, and returns the result as a `WaypointTrajectory`. + + Selection and transforms: + - Source demo selection is controlled by `SubTaskConfig.selection_strategy` (and kwargs) and by + `datagen_config.generation_select_src_per_subtask` / `generation_select_src_per_arm`. + - For coordination constraints, the method reuses/sets the selected source demo ID across + concurrent subtasks, computes `synchronous_steps`, and stores the pose `transform` used + to ensure consistent relative motion between tasks. + - Pose transforms are computed either from object poses (`object_ref`) or via a delta pose + provided by a concurrent task/coordination scheme. Args: - env_id: environment index - eef_name: name of end effector - subtask_ind: index of subtask - all_randomized_subtask_boundaries: randomized subtask boundaries - runtime_subtask_constraints_dict: runtime subtask constraints - selected_src_demo_inds: dictionary of selected source demo indices per eef, updated in place - prev_executed_traj: dictionary of previously executed eef trajectories + env_id: Environment index used to query current robot/object poses. + eef_name: End-effector key whose subtask trajectory is being generated. + subtask_ind: Index of the subtask within `subtask_configs[eef_name]`. + all_randomized_subtask_boundaries: For each EEF, an array of per-demo + randomized (start, end) indices for every subtask. + runtime_subtask_constraints_dict: In/out dictionary carrying runtime fields + for constraints (e.g., selected source ID, delta transform, synchronous steps). + selected_src_demo_inds: Per-EEF mapping for the currently selected source demo index + (may be reused across arms if configured). Returns: - trajectory: generated trajectory + WaypointTrajectory: The transformed trajectory for the selected subtask segment. """ subtask_configs = self.env_cfg.subtask_configs[eef_name] # name of object for this subtask @@ -357,7 +385,7 @@ def generate_trajectory( coord_transform_scheme = None if (eef_name, subtask_ind) in runtime_subtask_constraints_dict: if runtime_subtask_constraints_dict[(eef_name, subtask_ind)]["type"] == SubTaskConstraintType.COORDINATION: - # avoid selecting source demo if it has already been selected by the concurrent task + # Avoid selecting source demo if it has already been selected by the concurrent task concurrent_task_spec_key = runtime_subtask_constraints_dict[(eef_name, subtask_ind)][ "concurrent_task_spec_key" ] @@ -368,9 +396,10 @@ def generate_trajectory( (concurrent_task_spec_key, concurrent_subtask_ind) ]["selected_src_demo_ind"] if concurrent_selected_src_ind is not None: - # the concurrent task has started, so we should use the same source demo + # The concurrent task has started, so we should use the same source demo selected_src_demo_inds[eef_name] = concurrent_selected_src_ind need_source_demo_selection = False + # This transform is set at after the first data generation iteration/first run of the main while loop use_delta_transform = runtime_subtask_constraints_dict[ (concurrent_task_spec_key, concurrent_subtask_ind) ]["transform"] @@ -378,7 +407,7 @@ def generate_trajectory( assert ( "transform" not in runtime_subtask_constraints_dict[(eef_name, subtask_ind)] ), "transform should not be set for concurrent task" - # need to transform demo according to scheme + # Need to transform demo according to scheme coord_transform_scheme = runtime_subtask_constraints_dict[(eef_name, subtask_ind)][ "coordination_scheme" ] @@ -405,12 +434,12 @@ def generate_trajectory( for itrated_eef_name in self.env_cfg.subtask_configs.keys(): selected_src_demo_inds[itrated_eef_name] = selected_src_demo_ind - # selected subtask segment time indices + # Selected subtask segment time indices selected_src_subtask_boundary = all_randomized_subtask_boundaries[eef_name][selected_src_demo_ind, subtask_ind] if (eef_name, subtask_ind) in runtime_subtask_constraints_dict: if runtime_subtask_constraints_dict[(eef_name, subtask_ind)]["type"] == SubTaskConstraintType.COORDINATION: - # store selected source demo ind for concurrent task + # Store selected source demo ind for concurrent task runtime_subtask_constraints_dict[(eef_name, subtask_ind)][ "selected_src_demo_ind" ] = selected_src_demo_ind @@ -429,9 +458,7 @@ def generate_trajectory( subtask_len, concurrent_subtask_len ) - # TODO allow for different anchor selection strategies for each subtask - - # get subtask segment, consisting of the sequence of robot eef poses, target poses, gripper actions + # Get subtask segment, consisting of the sequence of robot eef poses, target poses, gripper actions src_ep_datagen_info = self.src_demo_datagen_info_pool.datagen_infos[selected_src_demo_ind] src_subtask_eef_poses = src_ep_datagen_info.eef_pose[eef_name][ selected_src_subtask_boundary[0] : selected_src_subtask_boundary[1] @@ -443,7 +470,7 @@ def generate_trajectory( selected_src_subtask_boundary[0] : selected_src_subtask_boundary[1] ] - # get reference object pose from source demo + # Get reference object pose from source demo src_subtask_object_pose = ( src_ep_datagen_info.object_poses[subtask_object_name][selected_src_subtask_boundary[0]] if (subtask_object_name is not None) @@ -452,10 +479,10 @@ def generate_trajectory( if is_first_subtask or self.env_cfg.datagen_config.generation_transform_first_robot_pose: # Source segment consists of first robot eef pose and the target poses. This ensures that - # we will interpolate to the first robot eef pose in this source segment, instead of the + # We will interpolate to the first robot eef pose in this source segment, instead of the # first robot target pose. src_eef_poses = torch.cat([src_subtask_eef_poses[0:1], src_subtask_target_poses], dim=0) - # account for extra timestep added to @src_eef_poses + # Account for extra timestep added to @src_eef_poses src_subtask_gripper_actions = torch.cat( [src_subtask_gripper_actions[0:1], src_subtask_gripper_actions], dim=0 ) @@ -466,19 +493,11 @@ def generate_trajectory( # Transform source demonstration segment using relevant object pose. if use_delta_transform is not None: - # use delta transform from concurrent task + # Use delta transform from concurrent task transformed_eef_poses = transform_source_data_segment_using_delta_object_pose( src_eef_poses, use_delta_transform ) - # TODO: Uncomment below to support case of temporal concurrent but NOT does not require coordination. Need to decide if this case is necessary - # if subtask_object_name is not None: - # transformed_eef_poses = PoseUtils.transform_source_data_segment_using_object_pose( - # cur_object_poses[task_spec_idx], - # src_eef_poses, - # src_subtask_object_pose, - # ) - else: if coord_transform_scheme is not None: delta_obj_pose = get_delta_pose_with_scheme( @@ -499,45 +518,89 @@ def generate_trajectory( ) else: print(f"skipping transformation for {subtask_object_name}") - # skip transformation if no reference object is provided + + # Skip transformation if no reference object is provided transformed_eef_poses = src_eef_poses + # Construct trajectory for the transformed segment. + transformed_seq = WaypointSequence.from_poses( + poses=transformed_eef_poses, + gripper_actions=src_subtask_gripper_actions, + action_noise=subtask_configs[subtask_ind].action_noise, + ) + transformed_traj = WaypointTrajectory() + transformed_traj.add_waypoint_sequence(transformed_seq) + + return transformed_traj + + def merge_eef_subtask_trajectory( + self, + env_id: int, + eef_name: str, + subtask_index: int, + prev_executed_traj: list[Waypoint] | None, + subtask_trajectory: WaypointTrajectory, + ) -> list[Waypoint]: + """ + Merge a subtask trajectory into an executable trajectory for the robot end-effector. + + This constructs a new `WaypointTrajectory` by first creating an initial + interpolation segment, then merging the provided `subtask_trajectory` onto it. + The initial segment begins either from the last executed target waypoint of the + previous subtask (if configured) or from the robot's current end-effector pose. + + Behavior: + - If `datagen_config.generation_interpolate_from_last_target_pose` is True and + this is not the first subtask, interpolation starts from the last waypoint of + `prev_executed_traj`. + - Otherwise, interpolation starts from the current robot EEF pose (queried from the env) + and uses the first waypoint's gripper action and the subtask's action noise. + - The merge uses `num_interpolation_steps`, `num_fixed_steps`, and optionally + `apply_noise_during_interpolation` from the corresponding `SubTaskConfig`. + - The temporary initial waypoint used to enable interpolation is removed before returning. + + Args: + env_id: Environment index to query the current robot EEF pose when needed. + eef_name: Name/key of the end-effector whose trajectory is being merged. + subtask_index: Index of the subtask within `subtask_configs[eef_name]` driving interpolation parameters. + prev_executed_traj: The previously executed trajectory used to + seed interpolation from its last target waypoint. Required when interpolation-from-last-target + is enabled and this is not the first subtask. + subtask_trajectory: + Trajectory segment for the current subtask that will be merged after the initial interpolation segment. + + Returns: + list[Waypoint]: The full sequence of waypoints to execute (initial interpolation segment followed by the subtask segment), + with the temporary initial waypoint removed. + """ + is_first_subtask = subtask_index == 0 # We will construct a WaypointTrajectory instance to keep track of robot control targets - # that will be executed and then execute it. + # and then execute it once we have the trajectory. traj_to_execute = WaypointTrajectory() if self.env_cfg.datagen_config.generation_interpolate_from_last_target_pose and (not is_first_subtask): # Interpolation segment will start from last target pose (which may not have been achieved). - assert prev_executed_traj[eef_name] is not None - last_waypoint = prev_executed_traj[eef_name][-1] + assert prev_executed_traj is not None + last_waypoint = prev_executed_traj[-1] init_sequence = WaypointSequence(sequence=[last_waypoint]) else: # Interpolation segment will start from current robot eef pose. init_sequence = WaypointSequence.from_poses( poses=self.env.get_robot_eef_pose(env_ids=[env_id], eef_name=eef_name)[0].unsqueeze(0), - gripper_actions=src_subtask_gripper_actions[0].unsqueeze(0), - action_noise=subtask_configs[subtask_ind].action_noise, + gripper_actions=subtask_trajectory[0].gripper_action.unsqueeze(0), + action_noise=self.env_cfg.subtask_configs[eef_name][subtask_index].action_noise, ) traj_to_execute.add_waypoint_sequence(init_sequence) - # Construct trajectory for the transformed segment. - transformed_seq = WaypointSequence.from_poses( - poses=transformed_eef_poses, - gripper_actions=src_subtask_gripper_actions, - action_noise=subtask_configs[subtask_ind].action_noise, - ) - transformed_traj = WaypointTrajectory() - transformed_traj.add_waypoint_sequence(transformed_seq) - # Merge this trajectory into our trajectory using linear interpolation. # Interpolation will happen from the initial pose (@init_sequence) to the first element of @transformed_seq. traj_to_execute.merge( - transformed_traj, - num_steps_interp=self.env_cfg.subtask_configs[eef_name][subtask_ind].num_interpolation_steps, - num_steps_fixed=self.env_cfg.subtask_configs[eef_name][subtask_ind].num_fixed_steps, + subtask_trajectory, + num_steps_interp=self.env_cfg.subtask_configs[eef_name][subtask_index].num_interpolation_steps, + num_steps_fixed=self.env_cfg.subtask_configs[eef_name][subtask_index].num_fixed_steps, action_noise=( - float(self.env_cfg.subtask_configs[eef_name][subtask_ind].apply_noise_during_interpolation) - * self.env_cfg.subtask_configs[eef_name][subtask_ind].action_noise + float(self.env_cfg.subtask_configs[eef_name][subtask_index].apply_noise_during_interpolation) + * self.env_cfg.subtask_configs[eef_name][subtask_index].action_noise ), ) @@ -549,7 +612,7 @@ def generate_trajectory( # Return the generated trajectory return traj_to_execute.get_full_sequence().sequence - async def generate( + async def generate( # noqa: C901 self, env_id: int, success_term: TerminationTermCfg, @@ -557,20 +620,22 @@ async def generate( env_action_queue: asyncio.Queue | None = None, pause_subtask: bool = False, export_demo: bool = True, + motion_planner: Any | None = None, ) -> dict: """ Attempt to generate a new demonstration. Args: - env_id: environment index + env_id: environment ID success_term: success function to check if the task is successful env_reset_queue: queue to store environment IDs for reset env_action_queue: queue to store actions for each environment - pause_subtask: if True, pause after every subtask during generation, for debugging - export_demo: if True, export the generated demonstration + pause_subtask: whether to pause the subtask generation + export_demo: whether to export the demo + motion_planner: motion planner to use for motion planning Returns: - results: dictionary with the following items: + results (dict): dictionary with the following items: initial_state (dict): initial simulator state for the executed trajectory states (list): simulator state at each timestep observations (list): observation dictionary at each timestep @@ -580,6 +645,9 @@ async def generate( src_demo_inds (list): list of selected source demonstration indices for each subtask src_demo_labels (np.array): same as @src_demo_inds, but repeated to have a label for each timestep of the trajectory """ + # With skillgen, a motion planner is required to generate collision-free transitions between subtasks. + if self.env_cfg.datagen_config.use_skillgen and motion_planner is None: + raise ValueError("motion_planner must be provided if use_skillgen is True") # reset the env to create a new task demo instance env_id_tensor = torch.tensor([env_id], dtype=torch.int64, device=self.env.device) @@ -601,15 +669,18 @@ async def generate( # some eef-specific state variables used during generation current_eef_selected_src_demo_indices = {} - current_eef_subtask_trajectories = {} + current_eef_subtask_trajectories: dict[str, list[Waypoint]] = {} current_eef_subtask_indices = {} + next_eef_subtask_indices_after_motion = {} + next_eef_subtask_trajectories_after_motion = {} current_eef_subtask_step_indices = {} eef_subtasks_done = {} for eef_name in self.env_cfg.subtask_configs.keys(): current_eef_selected_src_demo_indices[eef_name] = None - # prev_eef_executed_traj[eef_name] = None # type of list of Waypoint - current_eef_subtask_trajectories[eef_name] = None # type of list of Waypoint + current_eef_subtask_trajectories[eef_name] = [] # type of list of Waypoint current_eef_subtask_indices[eef_name] = 0 + next_eef_subtask_indices_after_motion[eef_name] = None + next_eef_subtask_trajectories_after_motion[eef_name] = None current_eef_subtask_step_indices[eef_name] = None eef_subtasks_done[eef_name] = False @@ -619,35 +690,120 @@ async def generate( async with self.src_demo_datagen_info_pool.asyncio_lock: if len(self.src_demo_datagen_info_pool.datagen_infos) > prev_src_demo_datagen_info_pool_size: # src_demo_datagen_info_pool at this point may be updated with new demos, - # so we need to updaet subtask boundaries again + # So we need to update subtask boundaries again randomized_subtask_boundaries = ( self.randomize_subtask_boundaries() ) # shape [N, S, 2], last dim is start and end action lengths prev_src_demo_datagen_info_pool_size = len(self.src_demo_datagen_info_pool.datagen_infos) - # generate trajectory for a subtask for the eef that is currently at the beginning of a subtask + # Generate trajectory for a subtask for the eef that is currently at the beginning of a subtask for eef_name, eef_subtask_step_index in current_eef_subtask_step_indices.items(): if eef_subtask_step_index is None: - # current_eef_selected_src_demo_indices will be updated in generate_trajectory - subtask_trajectory = self.generate_trajectory( - env_id, - eef_name, - current_eef_subtask_indices[eef_name], - randomized_subtask_boundaries, - runtime_subtask_constraints_dict, - current_eef_selected_src_demo_indices, - current_eef_subtask_trajectories, - ) - current_eef_subtask_trajectories[eef_name] = subtask_trajectory - current_eef_subtask_step_indices[eef_name] = 0 - # current_eef_selected_src_demo_indices[eef_name] = selected_src_demo_inds - # two_arm_trajectories[task_spec_idx] = subtask_trajectory - # prev_executed_traj[task_spec_idx] = subtask_trajectory + # Trajectory stored in current_eef_subtask_trajectories[eef_name] has been executed, + # So we need to determine the next trajectory + # Note: This condition is the "resume-after-motion-plan" gate for skillgen. When + # use_skillgen=False (vanilla Mimic), next_eef_subtask_indices_after_motion[eef_name] + # remains None, so this condition is always True and the else-branch below is never taken. + # The else-branch is only used right after executing a motion-planned transition (skillgen) + # to resume the actual subtask trajectory. + if next_eef_subtask_indices_after_motion[eef_name] is None: + # This is the beginning of a new subtask, so generate a new trajectory accordingly + eef_subtask_trajectory = self.generate_eef_subtask_trajectory( + env_id, + eef_name, + current_eef_subtask_indices[eef_name], + randomized_subtask_boundaries, + runtime_subtask_constraints_dict, + current_eef_selected_src_demo_indices, # updated in the method + ) + # With skillgen, use a motion planner to transition between subtasks. + if self.env_cfg.datagen_config.use_skillgen: + # Define the goal for the motion planner: the start of the next subtask. + target_eef_pose = eef_subtask_trajectory[0].pose + target_gripper_action = eef_subtask_trajectory[0].gripper_action + + # Determine expected object attachment using environment-specific logic (optional) + expected_attached_object = None + if hasattr(self.env, "get_expected_attached_object"): + expected_attached_object = self.env.get_expected_attached_object( + eef_name, current_eef_subtask_indices[eef_name], self.env.cfg + ) + + # Plan motion using motion planner with comprehensive world update and attachment handling + if motion_planner: + print(f"\n--- Environment {env_id}: Planning motion to target pose ---") + print(f"Target pose: {target_eef_pose}") + print(f"Expected attached object: {expected_attached_object}") + + # This call updates the planner's world model and computes the trajectory. + planning_success = motion_planner.update_world_and_plan_motion( + target_pose=target_eef_pose, + expected_attached_object=expected_attached_object, + env_id=env_id, + step_size=getattr(motion_planner, "step_size", None), + enable_retiming=hasattr(motion_planner, "step_size") + and motion_planner.step_size is not None, + ) + + # If planning succeeds, execute the planner's trajectory first. + if planning_success: + print(f"Env {env_id}: Motion planning succeeded") + # The original subtask trajectory is stored to be executed after the transition. + next_eef_subtask_trajectories_after_motion[eef_name] = eef_subtask_trajectory + next_eef_subtask_indices_after_motion[eef_name] = current_eef_subtask_indices[ + eef_name + ] + # Mark the current subtask as invalid (-1) until the transition is done. + current_eef_subtask_indices[eef_name] = -1 + + # Convert the planner's output into a sequence of waypoints to be executed. + current_eef_subtask_trajectories[eef_name] = ( + self._convert_planned_trajectory_to_waypoints( + motion_planner, target_gripper_action + ) + ) + current_eef_subtask_step_indices[eef_name] = 0 + print( + f"Generated {len(current_eef_subtask_trajectories[eef_name])} waypoints" + " from motion plan" + ) + + else: + # If planning fails, abort the data generation trial. + print(f"Env {env_id}: Motion planning failed for {eef_name}") + return {"success": False} + else: + # Without skillgen, transition using simple interpolation. + current_eef_subtask_trajectories[eef_name] = self.merge_eef_subtask_trajectory( + env_id, + eef_name, + current_eef_subtask_indices[eef_name], + current_eef_subtask_trajectories[eef_name], + eef_subtask_trajectory, + ) + current_eef_subtask_step_indices[eef_name] = 0 + else: + # Motion-planned trajectory has been executed, so we are ready to move to execute the next subtask + print("Finished executing motion-planned trajectory") + # It is important to pass the prev_executed_traj to merge_eef_subtask_trajectory + # so that it can correctly interpolate from the last pose of the motion-planned trajectory + prev_executed_traj = current_eef_subtask_trajectories[eef_name] + current_eef_subtask_indices[eef_name] = next_eef_subtask_indices_after_motion[eef_name] + current_eef_subtask_trajectories[eef_name] = self.merge_eef_subtask_trajectory( + env_id, + eef_name, + current_eef_subtask_indices[eef_name], + prev_executed_traj, + next_eef_subtask_trajectories_after_motion[eef_name], + ) + current_eef_subtask_step_indices[eef_name] = 0 + next_eef_subtask_trajectories_after_motion[eef_name] = None + next_eef_subtask_indices_after_motion[eef_name] = None - # determine the next waypoint for each eef based on the current subtask constraints + # Determine the next waypoint for each eef based on the current subtask constraints eef_waypoint_dict = {} for eef_name in sorted(self.env_cfg.subtask_configs.keys()): - # handle constraints + # Handle constraints step_ind = current_eef_subtask_step_indices[eef_name] subtask_ind = current_eef_subtask_indices[eef_name] if (eef_name, subtask_ind) in runtime_subtask_constraints_dict: @@ -660,7 +816,7 @@ async def generate( or step_ind >= len(current_eef_subtask_trajectories[eef_name]) - min_time_diff ): if step_ind > 0: - # wait at the same step + # Wait at the same step step_ind -= 1 current_eef_subtask_step_indices[eef_name] = step_ind @@ -676,8 +832,8 @@ async def generate( task_constraint["coordination_synchronize_start"] and current_eef_subtask_indices[concurrent_task_spec_key] < concurrent_subtask_ind ): - # the concurrent eef is not yet at the concurrent subtask, so wait at the first action - # this also makes sure that the concurrent task starts at the same time as this task + # The concurrent eef is not yet at the concurrent subtask, so wait at the first action + # This also makes sure that the concurrent task starts at the same time as this task step_ind = 0 current_eef_subtask_step_indices[eef_name] = 0 else: @@ -685,7 +841,7 @@ async def generate( not concurrent_task_fulfilled and step_ind >= len(current_eef_subtask_trajectories[eef_name]) - synchronous_steps ): - # trigger concurrent task + # Trigger concurrent task runtime_subtask_constraints_dict[(concurrent_task_spec_key, concurrent_subtask_ind)][ "fulfilled" ] = True @@ -697,10 +853,20 @@ async def generate( current_eef_subtask_step_indices[eef_name] = step_ind # wait here waypoint = current_eef_subtask_trajectories[eef_name][step_ind] + + # Update visualization if motion planner is available + if motion_planner and motion_planner.visualize_spheres: + try: + # Get current joint positions for visualization + current_joints = self.env.scene["robot"].data.joint_pos[env_id] + motion_planner._update_visualization_at_joint_positions(current_joints) + except Exception as e: + print(f"Warning: Could not update sphere visualization: {e}") + eef_waypoint_dict[eef_name] = waypoint multi_waypoint = MultiWaypoint(eef_waypoint_dict) - # execute the next waypoints for all eefs + # Execute the next waypoints for all eefs exec_results = await multi_waypoint.execute( env=self.env, success_term=success_term, @@ -708,7 +874,7 @@ async def generate( env_action_queue=env_action_queue, ) - # update execution state buffers + # Update execution state buffers if len(exec_results["states"]) > 0: generated_states.extend(exec_results["states"]) generated_obs.extend(exec_results["observations"]) @@ -720,7 +886,7 @@ async def generate( subtask_ind = current_eef_subtask_indices[eef_name] if current_eef_subtask_step_indices[eef_name] == len( current_eef_subtask_trajectories[eef_name] - ): # subtask done + ): # Subtask done if (eef_name, subtask_ind) in runtime_subtask_constraints_dict: task_constraint = runtime_subtask_constraints_dict[(eef_name, subtask_ind)] if task_constraint["type"] == SubTaskConstraintType._SEQUENTIAL_FORMER: @@ -732,9 +898,9 @@ async def generate( elif task_constraint["type"] == SubTaskConstraintType.COORDINATION: concurrent_task_spec_key = task_constraint["concurrent_task_spec_key"] concurrent_subtask_ind = task_constraint["concurrent_subtask_ind"] - # concurrent_task_spec_idx = task_spec_keys.index(concurrent_task_spec_key) + # Concurrent_task_spec_idx = task_spec_keys.index(concurrent_task_spec_key) task_constraint["finished"] = True - # check if concurrent task has been finished + # Check if concurrent task has been finished assert ( runtime_subtask_constraints_dict[(concurrent_task_spec_key, concurrent_subtask_ind)][ "finished" @@ -762,11 +928,11 @@ async def generate( if all(eef_subtasks_done.values()): break - # merge numpy arrays + # Merge numpy arrays if len(generated_actions) > 0: generated_actions = torch.cat(generated_actions, dim=0) - # set success to the recorded episode data and export to file + # Set success to the recorded episode data and export to file self.env.recorder_manager.set_success_to_episodes( env_id_tensor, torch.tensor([[generated_success]], dtype=torch.bool, device=self.env.device) ) @@ -781,3 +947,33 @@ async def generate( success=generated_success, ) return results + + def _convert_planned_trajectory_to_waypoints( + self, motion_planner: Any, gripper_action: torch.Tensor + ) -> list[Waypoint]: + """ + (skillgen) Convert a motion planner's output trajectory into a list of Waypoint objects. + + The motion planner provides a sequence of planned 4x4 poses. This method wraps each + pose into a `Waypoint`, pairing it with the provided `gripper_action` and an optional + per-timestep noise value sourced from the planner config (`motion_noise_scale`). + + Args: + motion_planner: Planner instance exposing `get_planned_poses()` and an optional + `config.motion_noise_scale` float. + gripper_action: Gripper actuation to associate with each planned pose. + + Returns: + list[Waypoint]: Sequence of waypoints corresponding to the planned trajectory. + """ + # Get motion noise scale from the planner's configuration + motion_noise_scale = getattr(motion_planner.config, "motion_noise_scale", 0.0) + + waypoints = [] + planned_poses = motion_planner.get_planned_poses() + + for planned_pose in planned_poses: + waypoint = Waypoint(pose=planned_pose, gripper_action=gripper_action, noise=motion_noise_scale) + waypoints.append(waypoint) + + return waypoints diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info.py index 5dcf5196d20..66faa8cc138 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info.py @@ -20,6 +20,7 @@ class DatagenInfo: Core Elements: - **eef_pose**: Captures the current 6 dimensional poses of the robot's end-effector. - **object_poses**: Captures the 6 dimensional poses of relevant objects in the scene. + - **subtask_start_signals**: Captures subtask start signals. Used by skillgen to identify the precise start of a subtask from a demonstration. - **subtask_term_signals**: Captures subtask completions signals. - **target_eef_pose**: Captures the target 6 dimensional poses for robot's end effector at each time step. - **gripper_action**: Captures the gripper's state. @@ -30,6 +31,7 @@ def __init__( eef_pose=None, object_poses=None, subtask_term_signals=None, + subtask_start_signals=None, target_eef_pose=None, gripper_action=None, ): @@ -38,6 +40,9 @@ def __init__( eef_pose (torch.Tensor or None): robot end effector poses of shape [..., 4, 4] object_poses (dict or None): dictionary mapping object name to object poses of shape [..., 4, 4] + subtask_start_signals (dict or None): dictionary mapping subtask name to a binary + indicator (0 or 1) on whether subtask has started. This is required when using skillgen. + Each value in the dictionary could be an int, float, or torch.Tensor of shape [..., 1]. subtask_term_signals (dict or None): dictionary mapping subtask name to a binary indicator (0 or 1) on whether subtask has been completed. Each value in the dictionary could be an int, float, or torch.Tensor of shape [..., 1]. @@ -53,6 +58,17 @@ def __init__( if object_poses is not None: self.object_poses = {k: object_poses[k] for k in object_poses} + # When using skillgen, demonstrations must be annotated with subtask start signals. + self.subtask_start_signals = None + if subtask_start_signals is not None: + self.subtask_start_signals = dict() + for k in subtask_start_signals: + if isinstance(subtask_start_signals[k], (float, int)): + self.subtask_start_signals[k] = subtask_start_signals[k] + else: + # Only create torch tensor if value is not a single value + self.subtask_start_signals[k] = subtask_start_signals[k] + self.subtask_term_signals = None if subtask_term_signals is not None: self.subtask_term_signals = dict() @@ -80,6 +96,8 @@ def to_dict(self): ret["eef_pose"] = self.eef_pose if self.object_poses is not None: ret["object_poses"] = deepcopy(self.object_poses) + if self.subtask_start_signals is not None: + ret["subtask_start_signals"] = deepcopy(self.subtask_start_signals) if self.subtask_term_signals is not None: ret["subtask_term_signals"] = deepcopy(self.subtask_term_signals) if self.target_eef_pose is not None: diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py index 348f4dd4a2a..ca95299d8ea 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py @@ -40,10 +40,15 @@ def __init__(self, env, env_cfg, device, asyncio_lock: asyncio.Lock | None = Non # Subtask termination infos for the given environment self.subtask_term_signal_names: dict[str, list[str]] = {} self.subtask_term_offset_ranges: dict[str, list[tuple[int, int]]] = {} + self.subtask_start_offset_ranges: dict[str, list[tuple[int, int]]] = {} + for eef_name, eef_subtask_configs in env_cfg.subtask_configs.items(): self.subtask_term_signal_names[eef_name] = [ subtask_config.subtask_term_signal for subtask_config in eef_subtask_configs ] + self.subtask_start_offset_ranges[eef_name] = [ + subtask_config.subtask_start_offset_range for subtask_config in eef_subtask_configs + ] self.subtask_term_offset_ranges[eef_name] = [ subtask_config.subtask_term_offset_range for subtask_config in eef_subtask_configs ] @@ -90,12 +95,14 @@ def _add_episode(self, episode: EpisodeData): """ ep_grp = episode.data - # extract datagen info + # Extract datagen info if "datagen_info" in ep_grp["obs"]: eef_pose = ep_grp["obs"]["datagen_info"]["eef_pose"] object_poses_dict = ep_grp["obs"]["datagen_info"]["object_pose"] target_eef_pose = ep_grp["obs"]["datagen_info"]["target_eef_pose"] subtask_term_signals_dict = ep_grp["obs"]["datagen_info"]["subtask_term_signals"] + # subtask_start_signals is optional + subtask_start_signals_dict = ep_grp["obs"]["datagen_info"].get("subtask_start_signals") else: raise ValueError("Episode to be loaded to DatagenInfo pool lacks datagen_info annotations") @@ -105,63 +112,97 @@ def _add_episode(self, episode: EpisodeData): ep_datagen_info_obj = DatagenInfo( eef_pose=eef_pose, object_poses=object_poses_dict, + subtask_start_signals=subtask_start_signals_dict, subtask_term_signals=subtask_term_signals_dict, target_eef_pose=target_eef_pose, gripper_action=gripper_actions, ) self._datagen_infos.append(ep_datagen_info_obj) - # parse subtask ranges using subtask termination signals and store + # Parse subtask ranges using subtask termination signals and store # the start and end indices of each subtask for each eef for eef_name in self.subtask_term_signal_names.keys(): if eef_name not in self._subtask_boundaries: self._subtask_boundaries[eef_name] = [] - prev_subtask_term_ind = 0 + prev_subtask_term_index = 0 eef_subtask_boundaries = [] - for subtask_term_signal_name in self.subtask_term_signal_names[eef_name]: - if subtask_term_signal_name is None: - # None refers to the final subtask, so finishes at end of demo - subtask_term_ind = ep_grp["actions"].shape[0] + for eef_subtask_index, eef_subtask_signal_name in enumerate(self.subtask_term_signal_names[eef_name]): + if self.env_cfg.datagen_config.use_skillgen: + # For skillgen, the start of a subtask is explicitly defined in the demonstration data. + if ep_datagen_info_obj.subtask_start_signals is None: + raise ValueError( + "subtask_start_signals field is not present in datagen_info for subtask" + f" {eef_subtask_signal_name} in the loaded episode when use_skillgen is enabled" + ) + # Find the first time step where the start signal transitions from 0 to 1. + subtask_start_indicators = ( + ep_datagen_info_obj.subtask_start_signals[eef_subtask_signal_name].flatten().int() + ) + # Compute the difference between consecutive elements to find the transition point. + diffs = subtask_start_indicators[1:] - subtask_start_indicators[:-1] + # The first non-zero element's index gives the start of the subtask. + start_index = int(diffs.nonzero()[0][0]) + 1 + else: + # Without skillgen, subtasks are assumed to be sequential. + start_index = prev_subtask_term_index + + if eef_subtask_index == len(self.subtask_term_signal_names[eef_name]) - 1: + # Last subtask has no termination signal from the datagen_info + end_index = ep_grp["actions"].shape[0] else: - # trick to detect index where first 0 -> 1 transition occurs - this will be the end of the subtask - subtask_indicators = ( - ep_datagen_info_obj.subtask_term_signals[subtask_term_signal_name].flatten().int() + # Trick to detect index where first 0 -> 1 transition occurs - this will be the end of the subtask + subtask_term_indicators = ( + ep_datagen_info_obj.subtask_term_signals[eef_subtask_signal_name].flatten().int() ) - diffs = subtask_indicators[1:] - subtask_indicators[:-1] - end_ind = int(diffs.nonzero()[0][0]) + 1 - subtask_term_ind = end_ind + 1 # increment to support indexing like demo[start:end] + diffs = subtask_term_indicators[1:] - subtask_term_indicators[:-1] + end_index = int(diffs.nonzero()[0][0]) + 1 + end_index = end_index + 1 # increment to support indexing like demo[start:end] - if subtask_term_ind <= prev_subtask_term_ind: + if end_index <= start_index: raise ValueError( - f"subtask termination signal is not increasing: {subtask_term_ind} should be greater than" - f" {prev_subtask_term_ind}" + f"subtask termination signal is not increasing: {end_index} should be greater than" + f" {start_index}" ) - eef_subtask_boundaries.append((prev_subtask_term_ind, subtask_term_ind)) - prev_subtask_term_ind = subtask_term_ind - - # run sanity check on subtask_term_offset_range in task spec to make sure we can never - # get an empty subtask in the worst case when sampling subtask bounds: - # - # end index of subtask i + max offset of subtask i < end index of subtask i + 1 + min offset of subtask i + 1 - # - for i in range(1, len(eef_subtask_boundaries)): - prev_max_offset_range = self.subtask_term_offset_ranges[eef_name][i - 1][1] - assert ( - eef_subtask_boundaries[i - 1][1] + prev_max_offset_range - < eef_subtask_boundaries[i][1] + self.subtask_term_offset_ranges[eef_name][i][0] - ), ( - "subtask sanity check violation in demo with subtask {} end ind {}, subtask {} max offset {}," - " subtask {} end ind {}, and subtask {} min offset {}".format( - i - 1, - eef_subtask_boundaries[i - 1][1], - i - 1, - prev_max_offset_range, - i, - eef_subtask_boundaries[i][1], - i, - self.subtask_term_offset_ranges[eef_name][i][0], + eef_subtask_boundaries.append((start_index, end_index)) + prev_subtask_term_index = end_index + + if self.env_cfg.datagen_config.use_skillgen: + # With skillgen, both start and end boundaries can be randomized. + # This checks if the randomized boundaries could result in an invalid (e.g., empty) subtask. + for i in range(len(eef_subtask_boundaries)): + # Ensure that a subtask is not empty in the worst-case randomization scenario. + assert ( + eef_subtask_boundaries[i][0] + self.subtask_start_offset_ranges[eef_name][i][1] + < eef_subtask_boundaries[i][1] + self.subtask_term_offset_ranges[eef_name][i][0] + ), f"subtask {i} is empty in the worst case" + if i == len(eef_subtask_boundaries) - 1: + break + # Make sure that subtasks are not overlapped with the largest offsets + assert ( + eef_subtask_boundaries[i][1] + self.subtask_term_offset_ranges[eef_name][i][1] + < eef_subtask_boundaries[i + 1][0] + self.subtask_start_offset_ranges[eef_name][i + 1][0] + ), f"subtasks {i} and {i + 1} are overlapped with the largest offsets" + else: + # Run sanity check on subtask_term_offset_range in task spec + for i in range(1, len(eef_subtask_boundaries)): + prev_max_offset_range = self.subtask_term_offset_ranges[eef_name][i - 1][1] + # Make sure that subtasks are not overlapped with the largest offsets + assert ( + eef_subtask_boundaries[i - 1][1] + prev_max_offset_range + < eef_subtask_boundaries[i][1] + self.subtask_term_offset_ranges[eef_name][i][0] + ), ( + "subtask sanity check violation in demo with subtask {} end ind {}, subtask {} max offset {}," + " subtask {} end ind {}, and subtask {} min offset {}".format( + i - 1, + eef_subtask_boundaries[i - 1][1], + i - 1, + prev_max_offset_range, + i, + eef_subtask_boundaries[i][1], + i, + self.subtask_term_offset_ranges[eef_name][i][0], + ) ) - ) self._subtask_boundaries[eef_name].append(eef_subtask_boundaries) diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py index 2866a217f03..6abdc088170 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/generation.py @@ -31,6 +31,7 @@ async def run_data_generator( data_generator: DataGenerator, success_term: TerminationTermCfg, pause_subtask: bool = False, + motion_planner: Any = None, ): """Run mimic data generation from the given data generator in the specified environment index. @@ -42,6 +43,7 @@ async def run_data_generator( data_generator: The data generator instance to use. success_term: The success termination term to use. pause_subtask: Whether to pause the subtask during generation. + motion_planner: The motion planner to use. """ global num_success, num_failures, num_attempts while True: @@ -51,6 +53,7 @@ async def run_data_generator( env_reset_queue=env_reset_queue, env_action_queue=env_action_queue, pause_subtask=pause_subtask, + motion_planner=motion_planner, ) if bool(results["success"]): num_success += 1 @@ -190,7 +193,12 @@ def setup_env_config( def setup_async_generation( - env: Any, num_envs: int, input_file: str, success_term: Any, pause_subtask: bool = False + env: Any, + num_envs: int, + input_file: str, + success_term: Any, + pause_subtask: bool = False, + motion_planners: Any = None, ) -> dict[str, Any]: """Setup async data generation tasks. @@ -200,6 +208,7 @@ def setup_async_generation( input_file: Path to input dataset file success_term: Success termination condition pause_subtask: Whether to pause after subtasks + motion_planners: Motion planner instances for all environments Returns: List of asyncio tasks for data generation @@ -216,9 +225,17 @@ def setup_async_generation( data_generator = DataGenerator(env=env, src_demo_datagen_info_pool=shared_datagen_info_pool) data_generator_asyncio_tasks = [] for i in range(num_envs): + env_motion_planner = motion_planners[i] if motion_planners else None task = asyncio_event_loop.create_task( run_data_generator( - env, i, env_reset_queue, env_action_queue, data_generator, success_term, pause_subtask=pause_subtask + env, + i, + env_reset_queue, + env_action_queue, + data_generator, + success_term, + pause_subtask=pause_subtask, + motion_planner=env_motion_planner, ) ) data_generator_asyncio_tasks.append(task) diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py index dedd20c75bf..008f6968099 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/__init__.py @@ -7,11 +7,13 @@ import gymnasium as gym +from .franka_bin_stack_ik_rel_mimic_env_cfg import FrankaBinStackIKRelMimicEnvCfg from .franka_stack_ik_abs_mimic_env import FrankaCubeStackIKAbsMimicEnv from .franka_stack_ik_abs_mimic_env_cfg import FrankaCubeStackIKAbsMimicEnvCfg from .franka_stack_ik_rel_blueprint_mimic_env_cfg import FrankaCubeStackIKRelBlueprintMimicEnvCfg from .franka_stack_ik_rel_mimic_env import FrankaCubeStackIKRelMimicEnv from .franka_stack_ik_rel_mimic_env_cfg import FrankaCubeStackIKRelMimicEnvCfg +from .franka_stack_ik_rel_skillgen_env_cfg import FrankaCubeStackIKRelSkillgenEnvCfg from .franka_stack_ik_rel_visuomotor_cosmos_mimic_env_cfg import FrankaCubeStackIKRelVisuomotorCosmosMimicEnvCfg from .franka_stack_ik_rel_visuomotor_mimic_env_cfg import FrankaCubeStackIKRelVisuomotorMimicEnvCfg @@ -65,3 +67,21 @@ }, disable_env_checker=True, ) + +gym.register( + id="Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0", + entry_point="isaaclab_mimic.envs:FrankaCubeStackIKRelMimicEnv", + kwargs={ + "env_cfg_entry_point": franka_stack_ik_rel_skillgen_env_cfg.FrankaCubeStackIKRelSkillgenEnvCfg, + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-Stack-Cube-Bin-Franka-IK-Rel-Mimic-v0", + entry_point="isaaclab_mimic.envs:FrankaCubeStackIKRelMimicEnv", + kwargs={ + "env_cfg_entry_point": franka_bin_stack_ik_rel_mimic_env_cfg.FrankaBinStackIKRelMimicEnvCfg, + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_bin_stack_ik_rel_mimic_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_bin_stack_ik_rel_mimic_env_cfg.py new file mode 100644 index 00000000000..ba40bd620e0 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_bin_stack_ik_rel_mimic_env_cfg.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from isaaclab.envs.mimic_env_cfg import MimicEnvCfg, SubTaskConfig +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.manipulation.stack.config.franka.bin_stack_ik_rel_env_cfg import FrankaBinStackEnvCfg + + +@configclass +class FrankaBinStackIKRelMimicEnvCfg(FrankaBinStackEnvCfg, MimicEnvCfg): + """ + Isaac Lab Mimic environment config class for Franka Cube Stack IK Rel env. + """ + + def __post_init__(self): + + # post init of parents + super().__post_init__() + + # Override the existing values + self.datagen_config.name = "demo_src_stack_isaac_lab_task_D0" + self.datagen_config.generation_guarantee = True + self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_num_trials = 10 + self.datagen_config.generation_select_src_per_subtask = True + self.datagen_config.generation_transform_first_robot_pose = False + self.datagen_config.generation_interpolate_from_last_target_pose = True + self.datagen_config.generation_relative = True + self.datagen_config.max_num_failures = 25 + self.datagen_config.seed = 1 + + # The following are the subtask configurations for the stack task. + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + object_ref="cube_2", + subtask_term_signal="grasp_1", + subtask_term_offset_range=(0, 0), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs={"nn_k": 3}, + action_noise=0.03, + num_interpolation_steps=0, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + description="Grasp red cube", + next_subtask_description="Stack red cube on top of blue cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + object_ref="cube_1", + subtask_term_signal="stack_1", + subtask_term_offset_range=(0, 0), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs={"nn_k": 3}, + action_noise=0.03, + num_interpolation_steps=0, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + next_subtask_description="Grasp green cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + object_ref="cube_3", + subtask_term_signal="grasp_2", + subtask_term_offset_range=(0, 0), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs={"nn_k": 3}, + action_noise=0.03, + num_interpolation_steps=0, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + next_subtask_description="Stack green cube on top of red cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + object_ref="cube_2", + subtask_term_signal="stack_2", + subtask_term_offset_range=(0, 0), + selection_strategy="nearest_neighbor_object", + selection_strategy_kwargs={"nn_k": 3}, + action_noise=0.03, + num_interpolation_steps=0, + num_fixed_steps=0, + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["franka"] = subtask_configs diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env.py b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env.py index ee442267e93..aba935a759f 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env.py +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_mimic_env.py @@ -163,3 +163,26 @@ def get_subtask_term_signals(self, env_ids: Sequence[int] | None = None) -> dict signals["stack_1"] = subtask_terms["stack_1"][env_ids] # final subtask is placing cubeC on cubeA (motion relative to cubeA) - but final subtask signal is not needed return signals + + def get_expected_attached_object(self, eef_name: str, subtask_index: int, env_cfg) -> str | None: + """ + (SkillGen) Return the expected attached object for the given EEF/subtask. + + Assumes 'stack' subtasks place the object grasped in the preceding 'grasp' subtask. + Returns None for 'grasp' (or others) at subtask start. + """ + if eef_name not in env_cfg.subtask_configs: + return None + + subtask_configs = env_cfg.subtask_configs[eef_name] + if not (0 <= subtask_index < len(subtask_configs)): + return None + + current_cfg = subtask_configs[subtask_index] + # If stacking, expect we are holding the object grasped in the prior subtask + if "stack" in str(current_cfg.subtask_term_signal).lower(): + if subtask_index > 0: + prev_cfg = subtask_configs[subtask_index - 1] + if "grasp" in str(prev_cfg.subtask_term_signal).lower(): + return prev_cfg.object_ref + return None diff --git a/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_skillgen_env_cfg.py b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_skillgen_env_cfg.py new file mode 100644 index 00000000000..dc344513fa9 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/envs/franka_stack_ik_rel_skillgen_env_cfg.py @@ -0,0 +1,135 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from isaaclab.envs.mimic_env_cfg import MimicEnvCfg, SubTaskConfig +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.manipulation.stack.config.franka.stack_ik_rel_env_cfg import FrankaCubeStackEnvCfg + + +@configclass +class FrankaCubeStackIKRelSkillgenEnvCfg(FrankaCubeStackEnvCfg, MimicEnvCfg): + """ + Isaac Lab Mimic environment config class for Franka Cube Stack IK Rel env. + """ + + def __post_init__(self): + # post init of parents + super().__post_init__() + # # TODO: Figure out how we can move this to the MimicEnvCfg class + # # The __post_init__() above only calls the init for FrankaCubeStackEnvCfg and not MimicEnvCfg + # # https://stackoverflow.com/questions/59986413/achieving-multiple-inheritance-using-python-dataclasses + + # Override the existing values + self.datagen_config.name = "demo_src_stack_isaac_lab_task_D0" + self.datagen_config.generation_guarantee = True + self.datagen_config.generation_keep_failed = True + self.datagen_config.generation_num_trials = 10 + self.datagen_config.generation_select_src_per_subtask = True + self.datagen_config.generation_transform_first_robot_pose = False + self.datagen_config.generation_interpolate_from_last_target_pose = True + self.datagen_config.generation_relative = True + self.datagen_config.max_num_failures = 25 + self.datagen_config.seed = 1 + + # The following are the subtask configurations for the stack task. + subtask_configs = [] + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="cube_2", + # This key corresponds to the binary indicator in "datagen_info" that signals + # when this subtask is finished (e.g., on a 0 to 1 edge). + subtask_term_signal="grasp_1", + # Specifies time offsets for data generation when splitting a trajectory into + # subtask segments. Random offsets are added to the termination boundary. + subtask_term_offset_range=(0, 0), # (10, 20), + # Selection strategy for the source subtask segment during data generation + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.03, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, # 5, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + description="Grasp red cube", + next_subtask_description="Stack red cube on top of blue cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="cube_1", + # Corresponding key for the binary indicator in "datagen_info" for completion + subtask_term_signal="stack_1", + # Time offsets for data generation when splitting a trajectory + subtask_term_offset_range=(0, 0), # (10, 20), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.03, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, # 5, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + next_subtask_description="Grasp green cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="cube_3", + # Corresponding key for the binary indicator in "datagen_info" for completion + subtask_term_signal="grasp_2", + # Time offsets for data generation when splitting a trajectory + subtask_term_offset_range=(0, 0), # (10, 20), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.03, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, # 5, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + next_subtask_description="Stack green cube on top of red cube", + ) + ) + subtask_configs.append( + SubTaskConfig( + # Each subtask involves manipulation with respect to a single object frame. + object_ref="cube_2", + # End of final subtask does not need to be detected for MimicGen + # Needs to be detected for SkillGen + # Setting this doesn't affect the data generation for MimicGen + subtask_term_signal="stack_2", + # No time offsets for the final subtask + subtask_term_offset_range=(0, 0), + # Selection strategy for source subtask segment + selection_strategy="nearest_neighbor_object", + # Optional parameters for the selection strategy function + selection_strategy_kwargs={"nn_k": 3}, + # Amount of action noise to apply during this subtask + action_noise=0.03, + # Number of interpolation steps to bridge to this subtask segment + num_interpolation_steps=0, # 5, + # Additional fixed steps for the robot to reach the necessary pose + num_fixed_steps=0, + # If True, apply action noise during the interpolation phase and execution + apply_noise_during_interpolation=False, + ) + ) + self.subtask_configs["franka"] = subtask_configs diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py new file mode 100644 index 00000000000..1caa7bc8b08 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py @@ -0,0 +1,130 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import torch +from abc import ABC, abstractmethod +from typing import Any + +from isaaclab.assets import Articulation +from isaaclab.envs.manager_based_env import ManagerBasedEnv + + +class MotionPlanner(ABC): + """Abstract base class for motion planners. + + This class defines the public interface that all motion planners must implement. + It focuses on the essential functionality that users interact with, while leaving + implementation details to specific planner backends. + + The core workflow is: + 1. Initialize planner with environment and robot + 2. Call update_world_and_plan_motion() to plan to a target + 3. Execute plan using has_next_waypoint() and get_next_waypoint_ee_pose() + + Example: + >>> planner = AnyMotionPlanner(env, robot) + >>> success = planner.update_world_and_plan_motion(target_pose) + >>> if success: + >>> while planner.has_next_waypoint(): + >>> action = planner.get_next_waypoint_ee_pose() + >>> obs, info = env.step(action) + """ + + def __init__( + self, env: ManagerBasedEnv, robot: Articulation, env_id: int = 0, debug: bool = False, **kwargs + ) -> None: + """Initialize the motion planner. + + Args: + env: The environment instance + robot: Robot articulation to plan motions for + env_id: Environment ID (0 to num_envs-1) + debug: Whether to print detailed debugging information + **kwargs: Additional planner-specific arguments + """ + self.env = env + self.robot = robot + self.env_id = env_id + self.debug = debug + + @abstractmethod + def update_world_and_plan_motion(self, target_pose: torch.Tensor, **kwargs) -> bool: + """Update collision world and plan motion to target pose. + + This is the main entry point for motion planning. It should: + 1. Update the planner's internal world representation + 2. Plan a collision-free path to the target pose + 3. Store the plan internally for execution + + Args: + target_pose: Target pose to plan motion to (4x4 transformation matrix) + **kwargs: Planner-specific arguments (e.g., retiming, contact planning) + + Returns: + bool: True if planning succeeded, False otherwise + """ + raise NotImplementedError + + @abstractmethod + def has_next_waypoint(self) -> bool: + """Check if there are more waypoints in current plan. + + Returns: + bool: True if there are more waypoints, False otherwise + """ + raise NotImplementedError + + @abstractmethod + def get_next_waypoint_ee_pose(self) -> Any: + """Get next waypoint's end-effector pose from current plan. + + This method should only be called after checking has_next_waypoint(). + + Returns: + Any: End-effector pose for the next waypoint in the plan. + """ + raise NotImplementedError + + def get_planned_poses(self) -> list[Any]: + """Get all planned poses from current plan. + + Returns: + list[Any]: List of planned poses. + + Note: + Default implementation iterates through waypoints. + Child classes can override for a more efficient implementation. + """ + planned_poses = [] + # Create a copy of the planner state to not affect the original plan execution + # This is a placeholder and may need to be implemented by child classes + # if they manage complex internal state. + # For now, we assume the planner can be reset and we can iterate through the plan. + # A more robust solution might involve a dedicated method to get the full plan. + self.reset_plan() + while self.has_next_waypoint(): + pose = self.get_next_waypoint_ee_pose() + planned_poses.append(pose) + return planned_poses + + @abstractmethod + def reset_plan(self) -> None: + """Reset the current plan and execution state. + + This should clear any stored plan and reset the execution index or iterator. + """ + raise NotImplementedError + + def get_planner_info(self) -> dict[str, Any]: + """Get information about the planner. + + Returns: + dict: Information about the planner (name, version, capabilities, etc.) + """ + return { + "name": self.__class__.__name__, + "env_id": self.env_id, + "debug": self.debug, + } diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py new file mode 100644 index 00000000000..a026b6f033f --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py @@ -0,0 +1,1948 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import torch +from dataclasses import dataclass +from typing import Any + +from curobo.geom.sdf.world import CollisionCheckerType +from curobo.geom.sphere_fit import SphereFitType +from curobo.geom.types import WorldConfig +from curobo.types.base import TensorDeviceType +from curobo.types.math import Pose +from curobo.types.state import JointState +from curobo.util.logger import setup_curobo_logger +from curobo.util.usd_helper import UsdHelper +from curobo.util_file import get_robot_configs_path, join_path, load_yaml +from curobo.wrap.reacher.motion_gen import MotionGen, MotionGenConfig, MotionGenPlanConfig + +import isaaclab.utils.math as PoseUtils +from isaaclab.assets import Articulation +from isaaclab.envs.manager_based_env import ManagerBasedEnv +from isaaclab.managers import SceneEntityCfg + +from isaaclab_mimic.motion_planners.base_motion_planner import MotionPlanner +from isaaclab_mimic.motion_planners.curobo.curobo_planner_config import CuroboPlannerConfig +from isaaclab_mimic.motion_planners.curobo.plan_visualizer import PlanVisualizer + + +@dataclass +class Attachment: + """Stores object attachment information for robot manipulation. + + This dataclass tracks the relative pose between an attached object and its parent link, + enabling the robot to maintain consistent object positioning during motion planning. + """ + + pose: Pose # Relative pose from parent link to object + parent: str # Parent link name + + +class CuroboPlanner(MotionPlanner): + """Motion planner for robot manipulation using cuRobo. + + This planner provides collision-aware motion planning capabilities for robotic manipulation tasks. + It integrates with Isaac Lab environments to: + + - Update collision world from current stage state + - Plan collision-free paths to target poses + - Handle object attachment and detachment during manipulation + - Execute planned motions with proper collision checking + + The planner uses cuRobo for fast motion generation and supports + multi-phase planning for contact scenarios like grasping and placing objects. + """ + + def __init__( + self, + env: ManagerBasedEnv, + robot: Articulation, + config: CuroboPlannerConfig, + task_name: str | None = None, + env_id: int = 0, + collision_checker: CollisionCheckerType = CollisionCheckerType.MESH, + num_trajopt_seeds: int = 12, + num_graph_seeds: int = 12, + interpolation_dt: float = 0.05, + ) -> None: + """Initialize the motion planner for a specific environment. + + Sets up the cuRobo motion generator with collision checking, configures the robot model, + and prepares visualization components if enabled. The planner is isolated to CUDA device + regardless of Isaac Lab's device configuration. + + Args: + env: The Isaac Lab environment instance containing the robot and scene + robot: Robot articulation to plan motions for + config: Configuration object containing planner parameters and settings + task_name: Task name for auto-configuration + env_id: Environment ID for multi-environment setups (0 to num_envs-1) + collision_checker: Type of collision checker + num_trajopt_seeds: Number of seeds for trajectory optimization + num_graph_seeds: Number of seeds for graph search + interpolation_dt: Time step for interpolating waypoints + """ + # Initialize base class + super().__init__(env=env, robot=robot, env_id=env_id, debug=config.debug_planner) + + # Store instance variables + self.config: CuroboPlannerConfig = config + self.n_repeat: int | None = self.config.n_repeat + self.step_size: float | None = self.config.motion_step_size + self.visualize_plan: bool = self.config.visualize_plan + self.visualize_spheres: bool = self.config.visualize_spheres + + # Print the config parameter values + print(f"Config parameter values: {self.config}") + + # Initialize plan visualizer if enabled + if self.visualize_plan: + # Use env-local base translation for multi-env rendering consistency + env_origin = self.env.scene.env_origins[env_id, :3] + base_translation = (self.robot.data.root_pos_w[env_id, :3] - env_origin).detach().cpu().numpy() + self.plan_visualizer = PlanVisualizer( + robot_name=self.config.robot_name, + recording_id=f"curobo_plan_{env_id}", + debug=self.debug, + base_translation=base_translation, + ) + + # Store attached objects as Attachment objects + self.attached_objects: dict[str, Attachment] = {} # object_name -> Attachment + + # Initialize cuRobo components - FORCE CUDA DEVICE FOR ISOLATION + setup_curobo_logger("warn") + + # Force cuRobo to always use CUDA device regardless of Isaac Lab device + # This isolates the motion planner from Isaac Lab's device configuration + self.tensor_args: TensorDeviceType + if torch.cuda.is_available(): + cuda_device = torch.device("cuda:0") + self.tensor_args = TensorDeviceType(device=cuda_device, dtype=torch.float32) + if self.debug: + print("cuRobo motion planner initialized on CUDA device.") + else: + # Fallback to CPU if CUDA not available, but this may cause issues + self.tensor_args = TensorDeviceType() + print("WARNING: CUDA not available, cuRobo using CPU - this may cause device compatibility issues") + + # Load robot configuration + robot_cfg_path: str = get_robot_configs_path() + robot_cfg: dict[str, Any] = load_yaml(join_path(robot_cfg_path, self.config.robot_config_file))["robot_cfg"] + + # Configure collision spheres + if self.config.collision_spheres_file: + robot_cfg["kinematics"]["collision_spheres"] = self.config.collision_spheres_file + + # Configure extra collision spheres + if self.config.extra_collision_spheres: + robot_cfg["kinematics"]["extra_collision_spheres"] = self.config.extra_collision_spheres + + self.robot_cfg: dict[str, Any] = robot_cfg + + # Load world configuration using the config's method + world_cfg: WorldConfig = self.config.get_world_config() + + # Create motion generator config with parameters from configuration + motion_gen_config: MotionGenConfig = MotionGenConfig.load_from_robot_config( + robot_cfg, + world_cfg, + tensor_args=self.tensor_args, + collision_checker_type=self.config.collision_checker_type, + num_trajopt_seeds=self.config.num_trajopt_seeds, + num_graph_seeds=self.config.num_graph_seeds, + interpolation_dt=self.config.interpolation_dt, + collision_cache=self.config.collision_cache_size, + trajopt_tsteps=self.config.trajopt_tsteps, + collision_activation_distance=self.config.collision_activation_distance, + position_threshold=self.config.position_threshold, + rotation_threshold=self.config.rotation_threshold, + ) + + # Create motion generator + self.motion_gen: MotionGen = MotionGen(motion_gen_config) + + # Set motion generator reference for plan visualizer if enabled + if self.visualize_plan: + self.plan_visualizer.set_motion_generator_reference(self.motion_gen) + + # Create plan config with parameters from configuration + self.plan_config: MotionGenPlanConfig = MotionGenPlanConfig( + enable_graph=self.config.enable_graph, + enable_graph_attempt=self.config.enable_graph_attempt, + max_attempts=self.config.max_planning_attempts, + enable_finetune_trajopt=self.config.enable_finetune_trajopt, + time_dilation_factor=self.config.time_dilation_factor, + ) + + # Create USD helper + self.usd_helper: UsdHelper = UsdHelper() + self.usd_helper.load_stage(env.scene.stage) + + # Initialize planning state + self._current_plan: JointState | None = None + self._plan_index: int = 0 + + # Initialize visualization state + self.frame_counter: int = 0 + self.spheres: list[tuple[str, float]] | None = None + self.sphere_update_freq: int = self.config.sphere_update_freq + + # Warm up planner + print("Warming up motion planner...") + self.motion_gen.warmup(enable_graph=True, warmup_js_trajopt=False) + + # Read static world geometry once + self._initialize_static_world() + + # Defer object validation baseline until first update_world() call when scene is fully loaded + self._expected_objects: set[str] | None = None + + # Define supported cuRobo primitive types for object discovery and pose synchronization + self.primitive_types: list[str] = ["mesh", "cuboid", "sphere", "capsule", "cylinder", "voxel", "blox"] + + # Cache object mappings + # Only recompute when objects are added/removed, not when poses change + self._cached_object_mappings: dict[str, str] | None = None + + self.counter = 0 + + # ===================================================================================== + # DEVICE CONVERSION UTILITIES + # ===================================================================================== + + def _to_curobo_device(self, tensor: torch.Tensor) -> torch.Tensor: + """Convert tensor to cuRobo device for isolated device management. + + Ensures all tensors used by cuRobo are on CUDA device, providing device isolation + from Isaac Lab's potentially different device configuration. This prevents device + mismatch errors and optimizes cuRobo performance. + + Args: + tensor: Input tensor (may be on any device) + + Returns: + Tensor converted to cuRobo's CUDA device with appropriate dtype + """ + return tensor.to(device=self.tensor_args.device, dtype=self.tensor_args.dtype) + + def _to_env_device(self, tensor: torch.Tensor) -> torch.Tensor: + """Convert tensor back to environment device for Isaac Lab compatibility. + + Converts cuRobo tensors back to the environment's device to ensure compatibility + with Isaac Lab operations that expect tensors on the environment's configured device. + + Args: + tensor: Input tensor from cuRobo operations (typically on CUDA) + + Returns: + Tensor converted to environment's device while preserving dtype + """ + return tensor.to(device=self.env.device, dtype=tensor.dtype) + + # ===================================================================================== + # INITIALIZATION AND CONFIGURATION + # ===================================================================================== + + def _initialize_static_world(self): + """Initialize static world geometry from USD stage. + + Reads static environment geometry once during planner initialization to establish + the base collision world. This includes walls, tables, bins, and other fixed obstacles + that don't change during the simulation. Dynamic objects are synchronized separately + in update_world() to maintain performance. + """ + env_prim_path = f"/World/envs/env_{self.env_id}" + robot_prim_path = f"{env_prim_path}/Robot" + + self._static_world_config = self.usd_helper.get_obstacles_from_stage( + only_paths=[env_prim_path], + reference_prim_path=robot_prim_path, + ignore_substring=[ + f"{env_prim_path}/Robot", + f"{env_prim_path}/target", + "/World/defaultGroundPlane", + "/curobo", + ], + ) + self._static_world_config = self._static_world_config.get_collision_check_world() + + # Initialize cuRobo world with static geometry + self.motion_gen.update_world(self._static_world_config) + + # ===================================================================================== + # PROPERTIES AND BASIC GETTERS + # ===================================================================================== + + @property + def attached_link(self) -> str: + """Default link name for object attachment operations.""" + return self.config.attached_object_link_name + + @property + def attachment_links(self) -> set[str]: + """Set of parent link names that currently have attached objects.""" + return {attachment.parent for attachment in self.attached_objects.values()} + + @property + def current_plan(self) -> JointState | None: + """Current plan from cuRobo motion generator.""" + return self._current_plan + + # ===================================================================================== + # WORLD AND OBJECT MANAGEMENT, ATTACHMENT, AND DETACHMENT + # ===================================================================================== + + def get_object_pose(self, object_name: str) -> Pose | None: + """Retrieve object pose from cuRobo's collision world model. + + Searches the collision world model for the specified object and returns its current + pose. This is useful for attachment calculations and debugging collision world state. + The method handles both mesh and cuboid object types automatically. + + Args: + object_name: Short object name used in Isaac Lab scene (e.g., "cube_1") + + Returns: + Object pose in cuRobo coordinate frame, or None if object not found + """ + # Get cached object mappings + object_mappings = self._get_object_mappings() + world_model = self.motion_gen.world_coll_checker.world_model + + object_path = object_mappings.get(object_name) + if not object_path: + if self.debug: + print(f"Object {object_name} not found in world model") + return None + + # Search for object in world model + for obj_list, obj_type in [ + (world_model.mesh, "mesh"), + (world_model.cuboid, "cuboid"), + ]: + if not obj_list: + continue + + for obj in obj_list: + if obj.name and object_path in str(obj.name): + if obj.pose is not None: + return Pose.from_list(obj.pose, tensor_args=self.tensor_args) + + if self.debug: + print(f"Object {object_name} found in mappings but pose not available") + return None + + def get_attached_pose(self, link_name: str, joint_state: JointState | None = None) -> Pose: + """Calculate pose of specified link using forward kinematics. + + Computes the world pose of any robot link at the given joint configuration. + This is essential for attachment calculations where we need to know the exact + pose of the parent link to compute relative object positions. + + Args: + link_name: Name of the robot link to get pose for + joint_state: Joint configuration to use for calculation, uses current state if None + + Returns: + World pose of the specified link in cuRobo coordinate frame + """ + if joint_state is None: + joint_state = self._get_current_joint_state_for_curobo() + + # Get all link states using the robot model + link_state = self.motion_gen.kinematics.get_state( + q=joint_state.position.detach().clone().to(device=self.tensor_args.device, dtype=self.tensor_args.dtype), + calculate_jacobian=False, + ) + + # Extract all link poses + link_poses = {} + if link_state.links_position is not None and link_state.links_quaternion is not None: + for i, link in enumerate(link_state.link_names): + link_poses[link] = Pose( + position=link_state.links_position[..., i, :], + quaternion=link_state.links_quaternion[..., i, :], + name=link, + ) + + # For attached object link, use ee_link from robot config as parent + if link_name == self.config.attached_object_link_name: + ee_link = self.config.ee_link_name or self.robot_cfg["kinematics"]["ee_link"] + if ee_link in link_poses: + if self.debug: + print(f"DEBUG GET_ATTACHED_POSE: Using {ee_link} for {link_name}") + return link_poses[ee_link] + else: + if self.debug: + print(f"WARNING: {ee_link} not found, using EE pose") + return self.get_ee_pose(joint_state) + + # Return directly for other links + if link_name in link_poses: + return link_poses[link_name] + else: + if self.debug: + print(f"WARNING: Link {link_name} not found, using EE pose") + return self.get_ee_pose(joint_state) + + def create_attachment( + self, object_name: str, link_name: str | None = None, joint_state: JointState | None = None + ) -> Attachment: + """Create attachment relationship between object and robot link. + + Computes the relative pose between an object and a robot link to enable the robot + to carry the object consistently during motion planning. The attachment stores the transform + from the parent link frame to the object frame, which remains constant while grasped. + + Args: + object_name: Name of the object to attach + link_name: Parent link for attachment, uses default attached_object_link if None + joint_state: Robot configuration for calculation, uses current state if None + + Returns: + Attachment object containing relative pose and parent link information + """ + if link_name is None: + link_name = self.attached_link + if joint_state is None: + joint_state = self._get_current_joint_state_for_curobo() + + # Get current link pose + link_pose = self.get_attached_pose(link_name, joint_state) + print(f"Getting object pose for {object_name}") + obj_pose = self.get_object_pose(object_name) + + # Compute relative pose + attach_pose = link_pose.inverse().multiply(obj_pose) + + if self.debug: + print(f"Creating attachment for {object_name} to {link_name}") + print(f"Link pose: {link_pose.position}") + print(f"Object pose (ACTUAL): {obj_pose.position}") + print(f"Computed relative pose: {attach_pose.position}") + + return Attachment(attach_pose, link_name) + + def update_world(self) -> None: + """Synchronize collision world with current Isaac Lab scene state. + + Updates all dynamic object poses in cuRobo's collision world to match their current + positions in Isaac Lab. This ensures collision checking uses accurate object positions + after simulation steps, resets, or manual object movements. Static world geometry + is loaded once during initialization and not updated here for performance. + + The method validates that the set of objects hasn't changed at runtime, as cuRobo + requires world model reinitialization when objects are added or removed. + """ + + # Establish validation baseline on first call, validate on subsequent calls + if self._expected_objects is None: + self._expected_objects = set(self._get_world_object_names()) + if self.debug: + print(f"Established object validation baseline: {len(self._expected_objects)} objects") + else: + # Subsequent calls: validate no changes + current_objects = set(self._get_world_object_names()) + if current_objects != self._expected_objects: + added = current_objects - self._expected_objects + removed = self._expected_objects - current_objects + + error_msg = "World objects changed at runtime!\n" + if added: + error_msg += f"Added: {added}\n" + if removed: + error_msg += f"Removed: {removed}\n" + error_msg += "cuRobo world model must be reinitialized." + + # Invalidate cached mappings since object set changed + self._cached_object_mappings = None + + raise RuntimeError(error_msg) + + # Sync object poses with Isaac Lab + self._sync_object_poses_with_isaaclab() + + if self.visualize_spheres: + self._update_sphere_visualization(force_update=True) + + def _get_world_object_names(self) -> list[str]: + """Extract all object names from cuRobo's collision world model. + + Iterates through all supported primitive types (mesh, cuboid, sphere, etc.) in the + collision world and collects their names. This is used for world validation to detect + when objects are added or removed at runtime. + + Returns: + List of all object names currently in the collision world model + """ + try: + world_model = self.motion_gen.world_coll_checker.world_model + + # Handle case where world_model might be a list + if isinstance(world_model, list): + if len(world_model) > self.env_id: + world_model = world_model[self.env_id] + else: + return [] + + object_names = [] + + # Get all primitive object names using the defined primitive types + for primitive_type in self.primitive_types: + if hasattr(world_model, primitive_type) and getattr(world_model, primitive_type): + primitive_list = getattr(world_model, primitive_type) + for primitive in primitive_list: + if primitive.name: + object_names.append(str(primitive.name)) + + return object_names + + except Exception as e: + if self.debug: + print(f"ERROR getting world object names: {e}") + return [] + + def _sync_object_poses_with_isaaclab(self) -> None: + """Synchronize cuRobo collision world with Isaac Lab object positions. + + Updates all dynamic object poses in cuRobo's world model to match their current + positions in Isaac Lab. This ensures accurate collision checking after simulation + steps or manual object movements. Static objects (bins, tables, walls) are skipped + for performance as they shouldn't move during simulation. + + The method updates both the world model and the collision checker to ensure + consistency across all cuRobo components. + """ + try: + # Get cached object mappings and world model + object_mappings = self._get_object_mappings() + world_model = self.motion_gen.world_coll_checker.world_model + rigid_objects = self.env.scene.rigid_objects + + updated_count = 0 + + for object_name, object_path in object_mappings.items(): + if object_name not in rigid_objects: + continue + + # Skip static mesh objects - they should not be dynamically updated + if any(static_name in object_name.lower() for static_name in ["bin", "table", "wall", "floor"]): + if self.debug: + print(f"SYNC: Skipping static object {object_name}") + continue + + # Get current pose from Lab (may be on CPU or CUDA depending on --device flag) + obj = rigid_objects[object_name] + env_origin = self.env.scene.env_origins[self.env_id] + current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin + current_quat_raw = obj.data.root_quat_w[self.env_id] # (w, x, y, z) + + # Convert to cuRobo device and extract float values for pose list + current_pos = self._to_curobo_device(current_pos_raw) + current_quat = self._to_curobo_device(current_quat_raw) + + # Convert to cuRobo pose format [x, y, z, w, x, y, z] + pose_list = [ + float(current_pos[0].item()), + float(current_pos[1].item()), + float(current_pos[2].item()), + float(current_quat[0].item()), + float(current_quat[1].item()), + float(current_quat[2].item()), + float(current_quat[3].item()), + ] + + # Update object pose in cuRobo's world model + if self._update_object_in_world_model(world_model, object_name, object_path, pose_list): + updated_count += 1 + + if self.debug: + print(f"SYNC: Updated {updated_count} object poses in cuRobo world model") + + # Sync object poses with collision checker + if updated_count > 0: + try: + # Update individual obstacle poses in collision checker + # This preserves static mesh objects unlike load_collision_model which rebuilds everything + for object_name, object_path in object_mappings.items(): + if object_name not in rigid_objects: + continue + + # Skip static mesh objects - they should not be dynamically updated + if any(static_name in object_name.lower() for static_name in ["bin", "table", "wall", "floor"]): + continue + + # Get current pose and update in collision checker + obj = rigid_objects[object_name] + env_origin = self.env.scene.env_origins[self.env_id] + current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin + current_quat_raw = obj.data.root_quat_w[self.env_id] + + current_pos = self._to_curobo_device(current_pos_raw) + current_quat = self._to_curobo_device(current_quat_raw) + + # Create cuRobo pose and update collision checker directly + curobo_pose = Pose(position=current_pos, quaternion=current_quat) + self.motion_gen.world_coll_checker.update_obstacle_pose( # type: ignore + object_path, curobo_pose, env_idx=self.env_id, update_cpu_reference=True + ) + + if self.debug: + print(f"Updated {updated_count} object poses in collision checker") + + except Exception as e: + if self.debug: + print(f"ERROR updating collision checker poses: {e}") + + except Exception as e: + if self.debug: + print(f"ERROR in pose synchronization: {e}") + + def _get_object_mappings(self) -> dict[str, str]: + """Get object mappings with caching for performance optimization. + + Returns cached mappings if available, otherwise computes and caches them. + Cache is invalidated when the object set changes. + + Returns: + Dictionary mapping Isaac Lab object names to their corresponding USD paths + """ + if self._cached_object_mappings is None: + world_model = self.motion_gen.world_coll_checker.world_model + rigid_objects = self.env.scene.rigid_objects + self._cached_object_mappings = self._discover_object_mappings(world_model, rigid_objects) + if self.debug: + print(f"Computed and cached object mappings: {len(self._cached_object_mappings)} objects") + + return self._cached_object_mappings + + def _discover_object_mappings(self, world_model, rigid_objects) -> dict[str, str]: + """Build mapping between Isaac Lab object names and cuRobo world paths. + + Automatically discovers the correspondence between Isaac Lab's rigid object names + and their full USD paths in cuRobo's world model. This mapping is essential for + pose synchronization and attachment operations, as cuRobo uses full USD paths + while Isaac Lab uses short object names. + + Args: + world_model: cuRobo's collision world model containing primitive objects + rigid_objects: Isaac Lab's rigid objects dictionary + + Returns: + Dictionary mapping Isaac Lab object names to their corresponding USD paths + """ + mappings = {} + env_prefix = f"/World/envs/env_{self.env_id}/" + world_object_paths = [] + + # Collect all primitive objects from cuRobo world model + for primitive_type in self.primitive_types: + primitive_list = getattr(world_model, primitive_type) + for primitive in primitive_list: + if primitive.name and env_prefix in str(primitive.name): + world_object_paths.append(str(primitive.name)) + + # Match Isaac Lab object names to world paths + for object_name in rigid_objects.keys(): + matched_path = None + # Direct name matching + for path in world_object_paths: + if object_name.lower().replace("_", "") in path.lower().replace("_", ""): + matched_path = path + break + + if matched_path: + mappings[object_name] = matched_path + if self.debug: + print(f"MAPPING: {object_name} -> {matched_path}") + else: + if self.debug: + print(f"WARNING: Could not find world path for {object_name}") + + return mappings + + def _update_object_in_world_model( + self, world_model, object_name: str, object_path: str, pose_list: list[float] + ) -> bool: + """Update a single object's pose in cuRobo's collision world model. + + Searches through all primitive types in the world model to find the specified object + and updates its pose. Uses flexible matching to handle variations in path naming + between Isaac Lab and cuRobo representations. + + Args: + world_model: cuRobo's collision world model + object_name: Short object name from Isaac Lab (e.g., "cube_1") + object_path: Full USD path for the object in cuRobo world + pose_list: New pose as [x, y, z, w, x, y, z] list in cuRobo format + + Returns: + True if object was found and successfully updated, False otherwise + """ + # Handle case where world_model might be a list + if isinstance(world_model, list): + if len(world_model) > self.env_id: + world_model = world_model[self.env_id] + else: + return False + + # Update all primitive types + for primitive_type in self.primitive_types: + primitive_list = getattr(world_model, primitive_type) + for primitive in primitive_list: + if primitive.name: + primitive_name = str(primitive.name) + # Use bidirectional matching for robust path matching + if object_path == primitive_name or object_path in primitive_name or primitive_name in object_path: + primitive.pose = pose_list + if self.debug: + print(f"Updated {primitive_type} {object_name} pose") + return True + + if self.debug: + print(f"WARNING: Object {object_name} not found in world model") + return False + + def _attach_object(self, object_name: str, object_path: str, env_id: int) -> bool: + """Attach an object to the robot for manipulation planning. + + Establishes an attachment between the specified object and the robot's end-effector + or configured attachment link. This enables the robot to carry the object during + motion planning while maintaining proper collision checking. The object's collision + geometry is disabled in the world model since it's now part of the robot. + + Args: + object_name: Short name of the object to attach (e.g., "cube_2") + object_path: Full USD path for the object in cuRobo world model + env_id: Environment ID for multi-environment support + + Returns: + True if attachment succeeded, False if attachment failed + """ + try: + current_joint_state = self._get_current_joint_state_for_curobo() + + if self.debug: + print(f"DEBUG ATTACH: Attaching {object_name} at path {object_path}") + + # Create attachment record (relative pose object-frame to parent link) + attachment = self.create_attachment( + object_name, + self.config.attached_object_link_name, + current_joint_state, + ) + self.attached_objects[object_name] = attachment + success = self.motion_gen.attach_objects_to_robot( + joint_state=current_joint_state, + object_names=[object_path], + link_name=self.config.attached_object_link_name, + surface_sphere_radius=self.config.surface_sphere_radius, + sphere_fit_type=SphereFitType.SAMPLE_SURFACE, + world_objects_pose_offset=None, + ) + + if success: + if self.debug: + print(f"DEBUG ATTACH: Successfully attached {object_name}") + print(f"DEBUG ATTACH: Current attached objects: {list(self.attached_objects.keys())}") + + # Force sphere visualization update + if self.visualize_spheres: + self._update_sphere_visualization(force_update=True) + + print("Sphere count after attach is successful: ", self._count_active_spheres()) + + # Deactivate the original obstacle as it's now carried by the robot + self.motion_gen.world_coll_checker.enable_obstacle(object_path, enable=False, env_idx=self.env_id) + + return True + else: + print(f"ERROR: cuRobo attach_objects_to_robot failed for {object_name}") + # Clean up on failure + if object_name in self.attached_objects: + del self.attached_objects[object_name] + return False + + except Exception as e: + print(f"ERROR: Failed to attach objects: {e}") + return False + + def _detach_objects(self, link_names: set[str] | None = None) -> bool: + """Detach objects from robot and restore collision checking. + + Removes object attachments from specified links and re-enables collision checking + for both the objects and the parent links. This is necessary when placing objects + or changing grasps. All attached objects are detached if no specific links are provided. + + Args: + link_names: Set of parent link names to detach objects from, detaches all if None + + Returns: + True if detachment operations completed successfully, False otherwise + """ + if link_names is None: + link_names = self.attachment_links + + if self.debug: + print(f"DEBUG DETACH: Detaching objects from links: {link_names}") + print(f"DEBUG DETACH: Current attached objects: {list(self.attached_objects.keys())}") + + # Get cached object mappings to find the USD path for re-enabling + object_mappings = self._get_object_mappings() + + detached_info = [] + detached_links = set() + for object_name, attachment in list(self.attached_objects.items()): + if attachment.parent not in link_names: + continue + + # Find object path and re-enable it in the world + object_path = object_mappings.get(object_name) + if object_path: + try: + self.motion_gen.world_coll_checker.enable_obstacle(object_path, enable=True, env_idx=self.env_id) + if self.debug: + print(f"DEBUG DETACH: Re-enabled obstacle {object_path}") + except Exception as e: + if self.debug: + print(f"ERROR re-enabling obstacle {object_path}: {e}") + + # Collect the link that will need re-enabling + detached_links.add(attachment.parent) + + # Remove from attached objects and log info + del self.attached_objects[object_name] + detached_info.append((object_name, attachment.parent)) + + if self.debug and detached_info: + for obj_name, parent_link in detached_info: + print(f"DEBUG DETACH: Detached {obj_name} from {parent_link}") + + # Re-enable collision checking for the attachment links (following the planning pattern) + if detached_links: + self._set_active_links(list(detached_links), active=True) + if self.debug: + print(f"DEBUG DETACH: Re-enabled collision for attachment links: {detached_links}") + + # Call cuRobo's detach for each link + for link_name in link_names: + try: + self.motion_gen.detach_object_from_robot(link_name=link_name) + if self.debug: + print(f"DEBUG DETACH: Called cuRobo detach for link {link_name}") + except Exception as e: + if self.debug: + print(f"DEBUG DETACH: cuRobo detach failed for {link_name}: {e}") + + return True + + def get_attached_objects(self) -> list[str]: + """Get list of currently attached object names. + + Returns the short names of all objects currently attached to the robot. + These names correspond to Isaac Lab scene object names, not full USD paths. + + Returns: + List of attached object names (e.g., ["cube_1", "cube_2"]) + """ + # With cumotion.py pattern, we store objects in self.attached_objects dict + return list(self.attached_objects.keys()) + + def has_attached_objects(self) -> bool: + """Check if any objects are currently attached to the robot. + + Useful for determining gripper state and collision checking configuration + before planning motions. + + Returns: + True if one or more objects are attached, False if no attachments exist + """ + # With cumotion.py pattern, check if attached_objects dict is non-empty + return len(self.attached_objects) > 0 + + # ===================================================================================== + # JOINT STATE AND KINEMATICS + # ===================================================================================== + + def _get_current_joint_state_for_curobo(self) -> JointState: + """ + Construct the current joint state for cuRobo with zero velocity and acceleration. + + This helper reads the robot's joint positions from Isaac Lab for the current environment + and pairs them with zero velocities and accelerations as required by cuRobo planning. + All tensors are moved to the cuRobo device and reordered to match the kinematic chain + used by the cuRobo motion generator. + + Returns: + JointState on the cuRobo device, ordered according to + `self.motion_gen.kinematics.joint_names`, with position from the robot + and zero velocity/acceleration. + """ + # Fetch joint position (shape: [1, num_joints]) + joint_pos_raw: torch.Tensor = self.robot.data.joint_pos[self.env_id, :].unsqueeze(0) + joint_vel_raw: torch.Tensor = torch.zeros_like(joint_pos_raw) + joint_acc_raw: torch.Tensor = torch.zeros_like(joint_pos_raw) + + # Move to cuRobo device + joint_pos: torch.Tensor = self._to_curobo_device(joint_pos_raw) + joint_vel: torch.Tensor = self._to_curobo_device(joint_vel_raw) + joint_acc: torch.Tensor = self._to_curobo_device(joint_acc_raw) + + cu_js: JointState = JointState( + position=joint_pos, + velocity=joint_vel, + acceleration=joint_acc, + joint_names=self.robot.data.joint_names, + tensor_args=self.tensor_args, + ) + return cu_js.get_ordered_joint_state(self.motion_gen.kinematics.joint_names) + + def get_ee_pose(self, joint_state: JointState) -> Pose: + """Compute end-effector pose from joint configuration. + + Uses cuRobo's forward kinematics to calculate the end-effector pose + at the specified joint configuration. Handles device conversion to ensure + compatibility with cuRobo's CUDA-based computations. + + Args: + joint_state: Robot joint configuration to compute end-effector pose from + + Returns: + End-effector pose in world coordinates + """ + # Ensure joint state is on CUDA device for cuRobo + if isinstance(joint_state.position, torch.Tensor): + cuda_position = self._to_curobo_device(joint_state.position) + else: + cuda_position = self._to_curobo_device(torch.tensor(joint_state.position)) + + # Create new joint state with CUDA tensors + cuda_joint_state = JointState( + position=cuda_position, + velocity=( + self._to_curobo_device(joint_state.velocity.detach().clone()) + if joint_state.velocity is not None + else cuda_position * 0.0 + ), + acceleration=( + self._to_curobo_device(joint_state.acceleration.detach().clone()) + if joint_state.acceleration is not None + else cuda_position * 0.0 + ), + joint_names=joint_state.joint_names, + tensor_args=self.tensor_args, + ) + + kin_state: Any = self.motion_gen.rollout_fn.compute_kinematics(cuda_joint_state) + return kin_state.ee_pose + + # ===================================================================================== + # PLANNING CORE METHODS + # ===================================================================================== + + def _set_active_links(self, links: list[str], active: bool) -> None: + """Configure collision checking for specific robot links. + + Enables or disables collision sphere checking for the specified links. + This is essential for contact scenarios where certain links (like fingers + or attachment points) need collision checking disabled to allow contact + with objects being grasped. + + Args: + links: List of link names to enable or disable collision checking for + active: True to enable collision checking, False to disable + """ + for link in links: + if active: + self.motion_gen.kinematics.kinematics_config.enable_link_spheres(link) + else: + self.motion_gen.kinematics.kinematics_config.disable_link_spheres(link) + + def plan_motion( + self, + target_pose: torch.Tensor, + step_size: float | None = None, + enable_retiming: bool | None = None, + ) -> bool: + """Plan collision-free motion to target pose. + + Plans a trajectory from the current robot configuration to the specified target pose. + The method assumes that world updates and locked joint configurations have already + been handled. Supports optional linear retiming for consistent execution speeds. + + Args: + target_pose: Target end-effector pose as 4x4 transformation matrix + step_size: Step size for linear retiming, enables retiming if provided + enable_retiming: Whether to enable linear retiming, auto-detected from step_size if None + + Returns: + True if planning succeeded and a valid trajectory was found, False otherwise + """ + if enable_retiming is None: + enable_retiming = step_size is not None + + # Ensure target pose is on cuRobo device (CUDA) for device isolation + target_pose_cuda = self._to_curobo_device(target_pose) + + target_pos: torch.Tensor + target_rot: torch.Tensor + target_pos, target_rot = PoseUtils.unmake_pose(target_pose_cuda) + target_curobo_pose: Pose = Pose( + position=target_pos, + quaternion=PoseUtils.quat_from_matrix(target_rot), + normalize_rotation=False, + ) + + start_state: JointState = self._get_current_joint_state_for_curobo() + + if self.debug: + print(f"Retiming enabled: {enable_retiming}, Step size: {step_size}") + + success: bool = self._plan_to_contact( + start_state=start_state, + goal_pose=target_curobo_pose, + retreat_distance=self.config.retreat_distance, + approach_distance=self.config.approach_distance, + retime_plan=enable_retiming, + step_size=step_size, + contact=False, + ) + + # Visualize plan if enabled + if success and self.visualize_plan and self._current_plan is not None: + # Get current spheres for visualization + self._sync_object_poses_with_isaaclab() + cu_js = self._get_current_joint_state_for_curobo() + sphere_list = self.motion_gen.kinematics.get_robot_as_spheres(cu_js.position)[0] + + # Split spheres into robot and attached object spheres + robot_spheres = [] + attached_spheres = [] + robot_link_count = 0 + + # Count robot link spheres + robot_links = [ + link + for link in self.robot_cfg["kinematics"]["collision_link_names"] + if link != self.config.attached_object_link_name + ] + for link_name in robot_links: + if hasattr(self.motion_gen.kinematics.kinematics_config, "get_link_spheres"): + link_spheres = self.motion_gen.kinematics.kinematics_config.get_link_spheres(link_name) + if link_spheres is not None: + robot_link_count += int(torch.sum(link_spheres[:, 3] > 0).item()) + + # Split spheres + for i, sphere in enumerate(sphere_list): + if i < robot_link_count: + robot_spheres.append(sphere) + else: + attached_spheres.append(sphere) + + # Compute end-effector positions for visualization + ee_positions_list = [] + finger_positions_list = [] + try: + for i in range(len(self._current_plan.position)): + js: JointState = self._current_plan[i] + kin = self.motion_gen.compute_kinematics(js) + ee_pos = kin.ee_position if hasattr(kin, "ee_position") else kin.ee_pose.position + ee_positions_list.append(ee_pos.cpu().numpy().squeeze()) + + # Path of one finger link for collision-checking visual + q = js.position + if isinstance(q, torch.Tensor): + if q.dim() == 1: + q = q.unsqueeze(0) + link_state = self.motion_gen.kinematics.get_state(q) + names = link_state.link_names + # Use first hand link name from config for finger visualization + finger_link_name = self.config.hand_link_names[0] if self.config.hand_link_names else None + if finger_link_name and finger_link_name in names: + idx = names.index(finger_link_name) + lf = link_state.links_position[0, idx, :].cpu().numpy() + finger_positions_list.append(lf) + + if self.debug and len(ee_positions_list) > 0: + print("Link names from kinematics:", kin.link_names) + + except Exception as e: + if self.debug: + print(f"Failed to compute EE positions for visualization: {e}") + ee_positions_list = None + finger_positions_list = None + + try: + if self.counter == 0: + import time + + time.sleep(5) + world_scene = WorldConfig.get_scene_graph(self.motion_gen.world_coll_checker.world_model) + self.counter += 1 + except Exception: + world_scene = None + + # Visualize plan + self.plan_visualizer.visualize_plan( + plan=self._current_plan, + target_pose=target_pose, + robot_spheres=robot_spheres, + attached_spheres=attached_spheres, + ee_positions=np.array(ee_positions_list) if ee_positions_list else None, + finger_positions=np.array(finger_positions_list) if finger_positions_list else None, + frame_duration=0.1, + world_scene=world_scene, + ) + + # Animate EE positions over the timeline for playback + if ee_positions_list: + self.plan_visualizer.animate_plan(np.array(ee_positions_list)) + + # Animate spheres along the path for collision visualization + self.plan_visualizer.animate_spheres_along_path( + plan=self._current_plan, + robot_spheres_at_start=robot_spheres, + attached_spheres_at_start=attached_spheres, + timeline="sphere_animation", + frame_duration=0.1, + interpolation_steps=15, # More steps for smoother animation + ) + + return success + + def _plan_to_contact_pose( + self, + start_state: JointState, + goal_pose: Pose, + contact: bool = True, + retime_plan: bool = False, + step_size: float | None = None, + ) -> bool: + """Plan motion with configurable collision checking for contact scenarios. + + Plans a trajectory while optionally disabling collision checking for hand links and + attached objects. This is crucial for grasping and placing operations where contact + is expected and collision checking would prevent successful planning. + + Args: + start_state: Starting joint configuration for planning + goal_pose: Target pose to reach in cuRobo coordinate frame + contact: True to disable hand/attached object collisions for contact planning + retime_plan: Whether to apply linear retiming to the resulting trajectory + step_size: Step size for retiming if retime_plan is True + + Returns: + True if planning succeeded, False if no valid trajectory found + """ + # Use configured hand link names instead of hardcoded ones + disable_link_names: list[str] = self.config.hand_link_names.copy() + link_spheres: dict[str, torch.Tensor] = {} + + # Count spheres before planning + sphere_counts_before = self._count_active_spheres() + if self.debug: + print( + f"Planning phase contact={contact}: Spheres before - Total: {sphere_counts_before['total']}, Robot:" + f" {sphere_counts_before['robot_links']}, Attached: {sphere_counts_before['attached_objects']}" + ) + + if contact: + # Store current spheres for the attached link so we can restore later + attached_links: list[str] = list(self.attachment_links) + for attached_link in attached_links: + link_spheres[attached_link] = self.motion_gen.kinematics.kinematics_config.get_link_spheres( + attached_link + ).clone() + + if self.debug: + print("Attached link: ", attached_links) + # Disable all specified links for contact planning + if self.debug: + print("Disable link names: ", disable_link_names) + self._set_active_links(disable_link_names + attached_links, active=False) + else: + if self.debug: + print("Disable link names: ", disable_link_names) + + # Count spheres after link disabling + sphere_counts_after_disable = self._count_active_spheres() + if self.debug: + print( + f"Planning phase contact={contact}: Spheres after disable - Total:" + f" {sphere_counts_after_disable['total']}, Robot: {sphere_counts_after_disable['robot_links']}," + f" Attached: {sphere_counts_after_disable['attached_objects']}" + ) + + planning_success = False + try: + result: Any = self.motion_gen.plan_single(start_state, goal_pose, self.plan_config) + + if result.success.item(): + if result.optimized_plan is not None and len(result.optimized_plan.position) > 0: + self._current_plan = result.optimized_plan + if self.debug: + print(f"Using optimized plan with {len(self._current_plan.position)} waypoints") + else: + self._current_plan = result.get_interpolated_plan() + if self.debug: + print(f"Using interpolated plan with {len(self._current_plan.position)} waypoints") + + self._current_plan = self.motion_gen.get_full_js(self._current_plan) + common_js_names: list[str] = [ + x for x in self.robot.data.joint_names if x in self._current_plan.joint_names + ] + self._current_plan = self._current_plan.get_ordered_joint_state(common_js_names) + self._plan_index = 0 + + if retime_plan and step_size is not None: + original_length: int = len(self._current_plan.position) + self._current_plan = self._linearly_retime_plan(step_size=step_size, plan=self._current_plan) + if self.debug: + print(f"Retimed plan from {original_length} to {len(self._current_plan.position)} waypoints") + + planning_success = True + if self.debug: + print(f"Contact planning succeeded with {len(self._current_plan.position)} waypoints") + else: + planning_success = False + if self.debug: + print(f"Contact planning failed: {result.status}") + + except Exception as e: + if self.debug: + print(f"Error during planning: {e}") + planning_success = False + + # Always restore sphere state after planning, regardless of success + if contact: + self._set_active_links(disable_link_names, active=True) + for attached_link, spheres in link_spheres.items(): + self.motion_gen.kinematics.kinematics_config.update_link_spheres(attached_link, spheres) + return planning_success + + def _plan_to_contact( + self, + start_state: JointState, + goal_pose: Pose, + retreat_distance: float, + approach_distance: float, + contact: bool = False, + retime_plan: bool = False, + step_size: float | None = None, + ) -> bool: + """Execute multi-phase contact planning with approach and retreat phases. + + Implements a planning strategy for manipulation tasks that require approach and contact handling. + Plans multiple trajectory segments with different collision checking configurations. + + Args: + start_state: Starting joint state for planning + goal_pose: Target pose to reach + retreat_distance: Distance to retreat before transition to contact + approach_distance: Distance to approach before final pose + contact: Whether to enable contact planning mode + retime_plan: Whether to retime the resulting plan + step_size: Step size for retiming (only used if retime_plan is True) + + Returns: + True if all planning phases succeeded, False if any phase failed + """ + if self.debug: + print(f"Multi-phase planning: retreat={retreat_distance}, approach={approach_distance}") + + target_poses: list[Pose] = [] + contacts: list[bool] = [] + + if retreat_distance is not None and retreat_distance > 0: + ee_pose: Pose = self.get_ee_pose(start_state) + retreat_pose: Pose = ee_pose.multiply( + Pose( + position=torch.tensor( + [0.0, 0.0, -retreat_distance], + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, + ), + quaternion=torch.tensor( + [1.0, 0.0, 0.0, 0.0], + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, + ), + ) + ) + target_poses.append(retreat_pose) + contacts.append(True) + contacts.append(contact) + if approach_distance is not None and approach_distance > 0: + approach_pose: Pose = goal_pose.multiply( + Pose( + position=torch.tensor( + [0.0, 0.0, -approach_distance], + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, + ), + quaternion=torch.tensor( + [1.0, 0.0, 0.0, 0.0], + device=self.tensor_args.device, + dtype=self.tensor_args.dtype, + ), + ) + ) + target_poses.append(approach_pose) + contacts.append(True) + + target_poses.append(goal_pose) + + current_state: JointState = start_state + full_plan: JointState | None = None + + for i, (target_pose, contact_flag) in enumerate(zip(target_poses, contacts)): + if self.debug: + print( + f"Planning phase {i + 1} of {len(target_poses)}: contact={contact_flag} (collision" + f" {'disabled' if contact_flag else 'enabled'})" + ) + + success: bool = self._plan_to_contact_pose( + start_state=current_state, + goal_pose=target_pose, + contact=contact_flag, + retime_plan=False, + step_size=None, + ) + + if not success: + if self.debug: + print(f"Phase {i + 1} planning failed") + return False + + if full_plan is None: + full_plan = self._current_plan + else: + full_plan = full_plan.stack(self._current_plan) + + last_waypoint: torch.Tensor = self._current_plan.position[-1] + current_state = JointState( + position=last_waypoint.unsqueeze(0), + velocity=torch.zeros_like(last_waypoint.unsqueeze(0)), + acceleration=torch.zeros_like(last_waypoint.unsqueeze(0)), + joint_names=self._current_plan.joint_names, + ) + current_state = current_state.get_ordered_joint_state(self.motion_gen.kinematics.joint_names) + + self._current_plan = full_plan + self._plan_index = 0 + + if retime_plan and step_size is not None: + original_length: int = len(self._current_plan.position) + self._current_plan = self._linearly_retime_plan(step_size=step_size, plan=self._current_plan) + if self.debug: + print(f"Retimed complete plan from {original_length} to {len(self._current_plan.position)} waypoints") + + if self.debug: + print(f"Multi-phase planning succeeded with {len(self._current_plan.position)} total waypoints") + + return True + + def _linearly_retime_plan( + self, + step_size: float = 0.01, + plan: JointState | None = None, + n_repeat: int | None = None, + ) -> JointState | None: + """Apply linear retiming to trajectory for consistent execution speed. + + Resamples the trajectory with uniform spacing between waypoints to ensure + consistent motion speed during execution. + + Args: + step_size: Desired spacing between waypoints in joint space + plan: Trajectory to retime, uses current plan if None + + Returns: + Retimed trajectory with uniform waypoint spacing, or None if plan is invalid + """ + if plan is None: + plan = self._current_plan + + if plan is None or len(plan.position) == 0: + return plan + + path = plan.position + + if len(path) <= 1: + return plan + + deltas = path[1:] - path[:-1] + distances = torch.norm(deltas, dim=-1) + + waypoints = [path[0]] + for i, (distance, waypoint) in enumerate(zip(distances, path[1:])): + if distance > 1e-6: + waypoints.append(waypoint) + + if len(waypoints) <= 1: + return plan + + waypoints = torch.stack(waypoints) + + if len(waypoints) > 1: + deltas = waypoints[1:] - waypoints[:-1] + distances = torch.norm(deltas, dim=-1) + cum_distances = torch.cat([torch.zeros(1, device=distances.device), torch.cumsum(distances, dim=0)]) + else: + cum_distances = torch.zeros(1, device=path.device) + + if len(waypoints) < 2 or cum_distances[-1] < 1e-6: + return plan + + total_distance = cum_distances[-1] + num_steps = int(torch.ceil(total_distance / step_size).item()) + 1 + + # Create linearly spaced distances + sampled_distances = torch.linspace(cum_distances[0], cum_distances[-1], num_steps, device=cum_distances.device) + + # Linear interpolation + indices = torch.searchsorted(cum_distances, sampled_distances) + indices = torch.clamp(indices, 0, len(cum_distances) - 1) + + # Get interpolation weights + weights = (sampled_distances - cum_distances[indices - 1]) / ( + cum_distances[indices] - cum_distances[indices - 1] + ) + weights = weights.unsqueeze(-1) + + # Interpolate waypoints + sampled_waypoints = (1 - weights) * waypoints[indices - 1] + weights * waypoints[indices] + + if self.debug: + print( + f"Retiming: {len(path)} to {len(sampled_waypoints)} waypoints, " + f"Distance: {total_distance:.3f}, Step size: {step_size}" + ) + + retimed_plan = JointState( + position=sampled_waypoints, + velocity=torch.zeros( + (len(sampled_waypoints), plan.velocity.shape[-1]), + device=plan.velocity.device, + dtype=plan.velocity.dtype, + ), + acceleration=torch.zeros( + (len(sampled_waypoints), plan.acceleration.shape[-1]), + device=plan.acceleration.device, + dtype=plan.acceleration.dtype, + ), + joint_names=plan.joint_names, + ) + + return retimed_plan + + def has_next_waypoint(self) -> bool: + """Check if more waypoints remain in the current trajectory. + + Returns: + True if there are unprocessed waypoints, False if trajectory is complete or empty + """ + return self._current_plan is not None and self._plan_index < len(self._current_plan.position) + + def get_next_waypoint_ee_pose(self) -> Pose: + """Get end-effector pose for the next waypoint in the trajectory. + + Advances the trajectory execution index and computes the end-effector pose + for the next waypoint using forward kinematics. + + Returns: + End-effector pose for the next waypoint in world coordinates + + Raises: + IndexError: If no more waypoints remain in the trajectory + """ + if not self.has_next_waypoint(): + raise IndexError("No more waypoints in the plan.") + next_joint_state: JointState = self._current_plan[self._plan_index] + self._plan_index += 1 + eef_state: Any = self.motion_gen.compute_kinematics(next_joint_state) + return eef_state.ee_pose + + def reset_plan(self) -> None: + """Reset trajectory execution state. + + Clears the current trajectory and resets the execution index to zero. + This prepares the planner for a new planning operation. + """ + self._plan_index = 0 + self._current_plan = None + + def get_planned_poses(self) -> list[torch.Tensor]: + """Extract all end-effector poses from current trajectory. + + Computes end-effector poses for all waypoints in the current trajectory without + affecting the execution state. Optionally repeats the final pose multiple times + if configured for stable goal reaching. + + Returns: + List of end-effector poses as 4x4 transformation matrices, with optional repetition + """ + if self._current_plan is None: + return [] + + # Save current execution state + original_plan_index = self._plan_index + + # Iterate through the plan to get all poses + planned_poses: list[torch.Tensor] = [] + self._plan_index = 0 + while self.has_next_waypoint(): + # Directly use the joint state from the plan to compute pose + # without advancing the main plan index in get_next_waypoint_ee_pose + next_joint_state: JointState = self._current_plan[self._plan_index] + self._plan_index += 1 # Manually advance index for this loop + eef_state: Any = self.motion_gen.compute_kinematics(next_joint_state) + planned_pose: Pose | None = eef_state.ee_pose + + if planned_pose is not None: + # Convert pose to environment device for compatibility + position = ( + self._to_env_device(planned_pose.position) + if isinstance(planned_pose.position, torch.Tensor) + else planned_pose.position + ) + rotation = ( + self._to_env_device(planned_pose.get_rotation()) + if isinstance(planned_pose.get_rotation(), torch.Tensor) + else planned_pose.get_rotation() + ) + planned_poses.append(PoseUtils.make_pose(position, rotation)[0]) + + # Restore the original execution state + self._plan_index = original_plan_index + + if self.n_repeat is not None and self.n_repeat > 0 and len(planned_poses) > 0: + print(f"Repeating final pose {self.n_repeat} times") + final_pose: torch.Tensor = planned_poses[-1] + planned_poses.extend([final_pose] * self.n_repeat) + + return planned_poses + + # ===================================================================================== + # VISUALIZATION METHODS + # ===================================================================================== + + def _update_visualization_at_joint_positions(self, joint_positions: torch.Tensor) -> None: + """Update sphere visualization for the robot at specific joint positions. + + Args: + joint_positions: Joint configuration to visualize collision spheres at + """ + if not self.visualize_spheres: + return + + self.frame_counter += 1 + if self.frame_counter % self.sphere_update_freq != 0: + return + + original_joints: torch.Tensor = self.robot.data.joint_pos[self.env_id].clone() + + try: + # Ensure joint positions are on environment device for robot commands + env_joint_positions = ( + self._to_env_device(joint_positions) if joint_positions.device != self.env.device else joint_positions + ) + self.robot.set_joint_position_target(env_joint_positions.view(1, -1), env_ids=[self.env_id]) + self._update_sphere_visualization(force_update=False) + finally: + self.robot.set_joint_position_target(original_joints.unsqueeze(0), env_ids=[self.env_id]) + + def _update_sphere_visualization(self, force_update: bool = True) -> None: + """Update visual representation of robot collision spheres in USD stage. + + Creates or updates sphere primitives in the USD stage to show the robot's + collision model. Different colors are used for robot links (green) and + attached objects (orange) to help distinguish collision boundaries. + + Args: + force_update: True to recreate all spheres, False to update existing positions only + """ + from isaaclab.sim.spawners.meshes import spawn_mesh_sphere + + # Get current sphere data + cu_js = self._get_current_joint_state_for_curobo() + sphere_position = self._to_curobo_device( + cu_js.position if isinstance(cu_js.position, torch.Tensor) else torch.tensor(cu_js.position) + ) + sphere_list = self.motion_gen.kinematics.get_robot_as_spheres(sphere_position)[0] + robot_link_count = self._get_robot_link_sphere_count() + + # Remove existing spheres if force update or first time + if (self.spheres is None or force_update) and self.spheres is not None: + self._remove_existing_spheres() + + # Initialize sphere list if needed + if self.spheres is None or force_update: + self.spheres = [] + + # Create or update all spheres + for sphere_idx, sphere in enumerate(sphere_list): + if not self._is_valid_sphere(sphere): + continue + + sphere_config = self._create_sphere_config(sphere_idx, sphere, robot_link_count) + prim_path = f"/curobo/robot_sphere_{sphere_idx}" + + # Remove old sphere if updating + if not (self.spheres is None or force_update): + if sphere_idx < len(self.spheres) and self.usd_helper.stage.GetPrimAtPath(prim_path).IsValid(): + self.usd_helper.stage.RemovePrim(prim_path) + + # Spawn sphere + spawn_mesh_sphere(prim_path=prim_path, translation=sphere_config["position"], cfg=sphere_config["cfg"]) + + # Store reference if creating new + if self.spheres is None or force_update or sphere_idx >= len(self.spheres): + self.spheres.append((prim_path, float(sphere.radius))) + + def _get_robot_link_sphere_count(self) -> int: + """Calculate total number of collision spheres for robot links excluding attached objects. + + Iterates through all robot collision links (excluding the attached object link) and + counts the active collision spheres for each link. This count is used to determine + which spheres in the visualization represent robot links vs attached objects. + + Returns: + Total number of active collision spheres for robot links only + """ + sphere_config = self.motion_gen.kinematics.kinematics_config + robot_links = [ + link + for link in self.robot_cfg["kinematics"]["collision_link_names"] + if link != self.config.attached_object_link_name + ] + return sum( + int(torch.sum(sphere_config.get_link_spheres(link_name)[:, 3] > 0).item()) for link_name in robot_links + ) + + def _remove_existing_spheres(self) -> None: + """Remove all existing sphere visualization primitives from the USD stage. + + Iterates through all stored sphere references and removes their corresponding + USD primitives from the stage. This is used during force updates or when + recreating the sphere visualization from scratch. + """ + stage = self.usd_helper.stage + for prim_path, _ in self.spheres: + if stage.GetPrimAtPath(prim_path).IsValid(): + stage.RemovePrim(prim_path) + + def _is_valid_sphere(self, sphere) -> bool: + """Validate sphere data for visualization rendering. + + Checks if a sphere has valid position coordinates (no NaN values) and a positive + radius. Invalid spheres are skipped during visualization to prevent rendering errors. + + Args: + sphere: Sphere object containing position and radius data + + Returns: + True if sphere has valid position and positive radius, False otherwise + """ + pos_tensor = torch.tensor(sphere.position, dtype=torch.float32) + return not torch.isnan(pos_tensor).any() and sphere.radius > 0 + + def _create_sphere_config(self, sphere_idx: int, sphere, robot_link_count: int) -> dict: + """Create sphere configuration with position and visual properties for USD rendering. + + Determines sphere type (robot link vs attached object), calculates world position, + and creates the appropriate visual configuration including colors and materials. + Robot link spheres are green with lower opacity, while attached object spheres + are orange with higher opacity for better distinction. + + Args: + sphere_idx: Index of the sphere in the sphere list + sphere: Sphere object containing position and radius data + robot_link_count: Total number of robot link spheres (for type determination) + + Returns: + Dictionary containing 'position' (world coordinates) and 'cfg' (MeshSphereCfg) + """ + from isaaclab.sim.spawners.materials import PreviewSurfaceCfg + from isaaclab.sim.spawners.meshes import MeshSphereCfg + + is_attached = sphere_idx >= robot_link_count + color = (0.8, 0.2, 0.0) if is_attached else (0.0, 0.8, 0.2) + opacity = 0.9 if is_attached else 0.5 + + # Calculate position + env_origin = self.env.scene.env_origins[self.env_id, :3] + root_translation = (self.robot.data.root_pos_w[self.env_id, :3] - env_origin).detach().cpu().numpy() + position = sphere.position.cpu().numpy() if hasattr(sphere.position, "cpu") else sphere.position + if not is_attached: + position = position + root_translation + + return { + "position": position, + "cfg": MeshSphereCfg( + radius=float(sphere.radius), visual_material=PreviewSurfaceCfg(diffuse_color=color, opacity=opacity) + ), + } + + def _is_sphere_attached_object(self, sphere_index: int, sphere_config: Any) -> bool: + """Check if a sphere belongs to attached_object link. + + Args: + sphere_index: Index of the sphere to check + sphere_config: Sphere configuration object + + Returns: + True if sphere belongs to an attached object, False if it's a robot link sphere + """ + # Get total number of robot link spheres (excluding attached_object) + robot_links = [ + link + for link in self.robot_cfg["kinematics"]["collision_link_names"] + if link != self.config.attached_object_link_name + ] + + total_robot_spheres = 0 + for link_name in robot_links: + try: + link_spheres = sphere_config.get_link_spheres(link_name) + active_spheres = torch.sum(link_spheres[:, 3] > 0).item() + total_robot_spheres += int(active_spheres) + except Exception: + continue + + # If sphere_index >= total_robot_spheres, it's an attached object sphere + is_attached = sphere_index >= total_robot_spheres + + if self.debug and sphere_index < 5: # Debug first few spheres + print(f"DEBUG SPHERE {sphere_index}: total_robot_spheres={total_robot_spheres}, is_attached={is_attached}") + + return is_attached + + # ===================================================================================== + # HIGH-LEVEL PLANNING INTERFACE + # ===================================================================================== + + def update_world_and_plan_motion( + self, + target_pose: torch.Tensor, + expected_attached_object: str | None = None, + env_id: int = 0, + step_size: float | None = None, + enable_retiming: bool | None = None, + ) -> bool: + """Complete planning pipeline with world updates and object attachment handling. + + Provides a high-level interface that handles the complete planning workflow: + world synchronization, object attachment/detachment, gripper configuration, + and motion planning. + + Args: + target_pose: Target end-effector pose as 4x4 transformation matrix + expected_attached_object: Name of object that should be attached, None for no attachment + env_id: Environment ID for multi-environment setups + step_size: Step size for linear retiming if retiming is enabled + enable_retiming: Whether to enable linear retiming of trajectory + + Returns: + True if complete planning pipeline succeeded, False if any step failed + """ + # Always reset the plan before starting a new one to ensure a clean state + self.reset_plan() + + if self.debug: + print("\n=== MOTION PLANNING DEBUG ===") + print(f"Expected attached object: {expected_attached_object}") + + self.update_world() + gripper_closed = expected_attached_object is not None + self._set_gripper_state(gripper_closed) + current_attached = self.get_attached_objects() + + if self.debug: + print(f"Current attached objects: {current_attached}") + + # Attach object if expected but not currently attached + if expected_attached_object and expected_attached_object not in current_attached: + if self.debug: + print(f"Need to attach {expected_attached_object}") + + object_mappings = self._get_object_mappings() + + if self.debug: + print(f"Object mappings found: {list(object_mappings.keys())}") + + if expected_attached_object in object_mappings: + expected_path = object_mappings[expected_attached_object] + + if self.debug: + print(f"Object path: {expected_path}") + + # Debug object poses + rigid_objects = self.env.scene.rigid_objects + if expected_attached_object in rigid_objects: + obj = rigid_objects[expected_attached_object] + origin = self.env.scene.env_origins[env_id] + obj_pos = obj.data.root_pos_w[env_id] - origin + print(f"Isaac Lab object position: {obj_pos}") + + # Debug end-effector position + ee_frame_cfg = SceneEntityCfg("ee_frame") + ee_frame = self.env.scene[ee_frame_cfg.name] + ee_pos = ee_frame.data.target_pos_w[env_id, 0, :] - origin + print(f"End-effector position: {ee_pos}") + + # Debug distance + distance = torch.linalg.vector_norm(obj_pos - ee_pos).item() + print(f"Distance EE to object: {distance:.4f}") + + # Debug gripper state + robot = self.env.scene["robot"] + gripper_pos = robot.data.joint_pos[env_id, -2:] + gripper_open_val = self.config.grasp_gripper_open_val + print(f"Gripper positions: {gripper_pos}") + print(f"Gripper open val: {gripper_open_val}") + + is_grasped = self._check_object_grasped(gripper_pos, expected_attached_object) + + if self.debug: + print(f"Is grasped check result: {is_grasped}") + + if is_grasped: + if self.debug: + print(f"Attaching {expected_attached_object}") + self._attach_object(expected_attached_object, expected_path, env_id) + print(f"Attached {expected_attached_object}") + else: + if self.debug: + print("Object not detected as grasped - attachment skipped") + print("This will cause collision with ghost object!") + else: + if self.debug: + print(f"Object {expected_attached_object} not found in world mappings") + + # Detach objects if no object should be attached (i.e., placing/releasing) + if expected_attached_object is None and current_attached: + if self.debug: + print("Detaching all objects as no object expected to be attached") + self._detach_objects() + + if self.debug: + print(f"Planning motion with attached objects: {self.get_attached_objects()}") + + plan_success = self.plan_motion(target_pose, step_size, enable_retiming) + + if self.debug: + print(f"Planning result: {plan_success}") + print("=== END POST-GRASP DEBUG ===\n") + + self._detach_objects() + + return plan_success + + # ===================================================================================== + # UTILITY METHODS + # ===================================================================================== + + def _check_object_grasped(self, gripper_pos: torch.Tensor, object_name: str) -> bool: + """Check if a specific object is currently grasped by the robot. + + Uses gripper position to determine if an object is grasped. + + Args: + gripper_pos: Gripper position tensor + object_name: Name of object to check (e.g., "cube_1") + + Returns: + True if object is detected as grasped + """ + gripper_open_val = self.config.grasp_gripper_open_val + + if gripper_pos[0].item() < gripper_open_val: + print(f"Object {object_name} is grasped") + return True + else: + print(f"Object {object_name} is not grasped") + return False + + def _set_gripper_state(self, has_attached_objects: bool) -> None: + """Configure gripper joint positions based on object attachment status. + + Sets the gripper to closed position when objects are attached and open position + when no objects are attached. This ensures proper collision checking and planning + with the correct gripper configuration. + + Args: + has_attached_objects: True if robot currently has attached objects requiring closed gripper + """ + if has_attached_objects: + # Closed gripper for grasping + locked_joints = self.config.gripper_closed_positions + else: + # Open gripper for manipulation + locked_joints = self.config.gripper_open_positions + + self.motion_gen.update_locked_joints(locked_joints, self.robot_cfg) + + def _count_active_spheres(self) -> dict[str, int]: + """Count active collision spheres by category for debugging. + + Analyzes the current collision sphere configuration to provide detailed + statistics about robot links vs attached object spheres. This is helpful + for debugging collision checking issues and attachment problems. + + Returns: + Dictionary containing sphere counts by category (total, robot_links, attached_objects) + """ + cu_js = self._get_current_joint_state_for_curobo() + + # Ensure position tensor is on CUDA for cuRobo + if isinstance(cu_js.position, torch.Tensor): + sphere_position = self._to_curobo_device(cu_js.position) + else: + # Convert list to tensor and move to CUDA + sphere_position = self._to_curobo_device(torch.tensor(cu_js.position)) + + sphere_list = self.motion_gen.kinematics.get_robot_as_spheres(sphere_position)[0] + + # Get sphere configuration + sphere_config = self.motion_gen.kinematics.kinematics_config + + # Count robot link spheres (excluding attached_object) + robot_links = [ + link + for link in self.robot_cfg["kinematics"]["collision_link_names"] + if link != self.config.attached_object_link_name + ] + robot_sphere_count = 0 + for link_name in robot_links: + if hasattr(sphere_config, "get_link_spheres"): + link_spheres = sphere_config.get_link_spheres(link_name) + if link_spheres is not None: + active_spheres = torch.sum(link_spheres[:, 3] > 0).item() + robot_sphere_count += int(active_spheres) + + # Count attached object spheres by checking actual sphere list + attached_sphere_count = 0 + + # Handle sphere_list as either a list or single Sphere object + total_spheres = len(list(sphere_list)) + + # Any spheres beyond robot_sphere_count are attached object spheres + attached_sphere_count = max(0, total_spheres - robot_sphere_count) + + if self.debug: + print( + f"DEBUG SPHERE COUNT: Total={total_spheres}, Robot={robot_sphere_count}," + f"Attached={attached_sphere_count}" + ) + + return { + "total": total_spheres, + "robot_links": robot_sphere_count, + "attached_objects": attached_sphere_count, + } diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py new file mode 100644 index 00000000000..3b0babbabf0 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py @@ -0,0 +1,285 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass, field + +from curobo.geom.sdf.world import CollisionCheckerType +from curobo.geom.types import WorldConfig +from curobo.util_file import get_world_configs_path, join_path, load_yaml + + +@dataclass +class CuroboPlannerConfig: + """Configuration for CuRobo motion planner.""" + + # Robot configuration + robot_config_file: str = "franka.yml" + """cuRobo robot configuration file (path defined by curobo api).""" + + robot_name: str = "franka" + """Robot name for visualization and identification.""" + + ee_link_name: str | None = None + """End-effector link name (auto-detected from robot config if None).""" + + # Gripper configuration + gripper_joint_names: list[str] = field(default_factory=lambda: ["panda_finger_joint1", "panda_finger_joint2"]) + """Names of gripper joints.""" + + gripper_open_positions: dict[str, float] = field( + default_factory=lambda: {"panda_finger_joint1": 0.04, "panda_finger_joint2": 0.04} + ) + """Open gripper positions for cuRobo to update spheres""" + + gripper_closed_positions: dict[str, float] = field( + default_factory=lambda: {"panda_finger_joint1": 0.008, "panda_finger_joint2": 0.008} + ) + """Closed gripper positions for cuRobo to update spheres""" + + # Hand link configuration (for contact planning) + hand_link_names: list[str] = field(default_factory=lambda: ["panda_leftfinger", "panda_rightfinger", "panda_hand"]) + """Names of hand/finger links to disable during contact planning.""" + + # Attachment configuration + attached_object_link_name: str = "attached_object" + """Name of the link used for attaching objects.""" + + # World configuration + world_config_file: str = "collision_table.yml" + """CuRobo world configuration file (without path).""" + + # Motion planning parameters + collision_checker_type: CollisionCheckerType = CollisionCheckerType.MESH + """Type of collision checker to use.""" + + num_trajopt_seeds: int = 12 + """Number of seeds for trajectory optimization.""" + + num_graph_seeds: int = 12 + """Number of seeds for graph search.""" + + interpolation_dt: float = 0.05 + """Time step for interpolating waypoints.""" + + collision_cache_size: dict[str, int] = field(default_factory=lambda: {"obb": 150, "mesh": 150}) + """Cache sizes for different collision types.""" + + trajopt_tsteps: int = 32 + """Number of trajectory optimization time steps.""" + + collision_activation_distance: float = 0.0 + """Distance at which collision constraints are activated.""" + + approach_distance: float = 0.05 + """Distance to approach at the end of the plan.""" + + retreat_distance: float = 0.05 + """Distance to retreat at the start of the plan.""" + + grasp_gripper_open_val: float = 0.04 + """Gripper joint value when considered open for grasp detection.""" + + # Planning configuration + enable_graph: bool = True + """Whether to enable graph-based planning.""" + + enable_graph_attempt: int = 5 + """Number of graph planning attempts.""" + + max_planning_attempts: int = 15 + """Maximum number of planning attempts.""" + + enable_finetune_trajopt: bool = True + """Whether to enable trajectory optimization fine-tuning.""" + + time_dilation_factor: float = 1.0 + """Time dilation factor for planning.""" + + surface_sphere_radius: float = 0.005 + """Radius of surface spheres for collision checking.""" + + # Debug and visualization + n_repeat: int | None = None + """Number of times to repeat final waypoint for stabilization. If None, no repetition.""" + + motion_step_size: float | None = None + """Step size (in radians) for retiming motion plans. If None, no retiming.""" + + visualize_spheres: bool = False + """Visualize robot collision spheres. Note: only works for env 0.""" + + visualize_plan: bool = False + """Visualize motion plan in Rerun. Note: only works for env 0.""" + + debug_planner: bool = False + """Enable detailed motion planning debug information.""" + + sphere_update_freq: int = 5 + """Frequency to update sphere visualization, specified in number of frames.""" + + motion_noise_scale: float = 0.0 + """Scale of Gaussian noise to add to the planned waypoints. Defaults to 0.0 (no noise).""" + + # Collision sphere configuration + collision_spheres_file: str | None = None + """Collision spheres configuration file (auto-detected if None).""" + + extra_collision_spheres: dict[str, int] = field(default_factory=lambda: {"attached_object": 100}) + """Extra collision spheres for attached objects.""" + + position_threshold: float = 0.005 + """Position threshold for motion planning.""" + + rotation_threshold: float = 0.05 + """Rotation threshold for motion planning.""" + + def get_world_config(self) -> WorldConfig: + """Load and prepare the world configuration. + + This method can be overridden in subclasses or customized per task + to provide different world configuration setups. + + Returns: + WorldConfig: The configured world for collision checking + """ + # Default implementation: just load the world config file + world_cfg = WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), self.world_config_file))) + return world_cfg + + def _get_world_config_with_table_adjustment(self) -> WorldConfig: + """Load world config with standard table adjustments. + + This is a helper method that implements the common pattern of adjusting + table height and combining mesh/cuboid worlds. Used by specific task configs. + + Returns: + WorldConfig: World configuration with adjusted table + """ + # Load the base world config + world_cfg_table = WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), self.world_config_file))) + + # Adjust table height if cuboid exists + if world_cfg_table.cuboid is not None: + if len(world_cfg_table.cuboid) > 0: + world_cfg_table.cuboid[0].pose[2] -= 0.02 + + # Get mesh world for additional collision objects + world_cfg_mesh = WorldConfig.from_dict( + load_yaml(join_path(get_world_configs_path(), self.world_config_file)) + ).get_mesh_world() + + # Adjust mesh configuration if it exists + if world_cfg_mesh.mesh is not None: + if len(world_cfg_mesh.mesh) > 0: + world_cfg_mesh.mesh[0].name += "_mesh" + world_cfg_mesh.mesh[0].pose[2] = -10.5 # Move mesh below scene + + # Combine cuboid and mesh worlds + world_cfg = WorldConfig(cuboid=world_cfg_table.cuboid, mesh=world_cfg_mesh.mesh) + return world_cfg + + @classmethod + def franka_config(cls) -> "CuroboPlannerConfig": + """Create configuration for Franka Panda robot.""" + return cls( + robot_config_file="franka.yml", + robot_name="franka", + gripper_joint_names=["panda_finger_joint1", "panda_finger_joint2"], + gripper_open_positions={"panda_finger_joint1": 0.04, "panda_finger_joint2": 0.04}, + gripper_closed_positions={"panda_finger_joint1": 0.023, "panda_finger_joint2": 0.023}, + hand_link_names=["panda_leftfinger", "panda_rightfinger", "panda_hand"], + collision_spheres_file="spheres/franka_mesh.yml", + grasp_gripper_open_val=0.04, + approach_distance=0.0, + retreat_distance=0.0, + max_planning_attempts=1, + time_dilation_factor=0.6, + enable_finetune_trajopt=False, + n_repeat=None, + motion_step_size=None, + visualize_spheres=False, + visualize_plan=False, + debug_planner=False, + sphere_update_freq=5, + motion_noise_scale=0.02, + ) + + @classmethod + def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": + """Create configuration for Franka stacking cube in a bin.""" + config = cls.franka_config() + config.gripper_closed_positions = {"panda_finger_joint1": 0.024, "panda_finger_joint2": 0.024} + config.grasp_gripper_open_val = 0.04 + config.approach_distance = 0.05 + config.retreat_distance = 0.07 + config.surface_sphere_radius = 0.01 + config.debug_planner = True + config.collision_activation_distance = 0.02 + config.visualize_plan = True + config.enable_finetune_trajopt = True + config.motion_noise_scale = 0.02 + config.get_world_config = lambda: config._get_world_config_with_table_adjustment() + return config + + @classmethod + def franka_stack_square_nut_config(cls) -> "CuroboPlannerConfig": + """Create configuration for Franka stacking a square nut.""" + config = cls.franka_config() + config.gripper_closed_positions = {"panda_finger_joint1": 0.021, "panda_finger_joint2": 0.021} + config.grasp_gripper_open_val = 0.04 + config.approach_distance = 0.11 + config.retreat_distance = 0.11 + config.extra_collision_spheres = {"attached_object": 200} + config.surface_sphere_radius = 0.005 + config.n_repeat = None + config.motion_step_size = None + config.visualize_spheres = False + config.visualize_plan = True + config.debug_planner = True + config.motion_noise_scale = 0.0 + config.time_dilation_factor = 0.4 + config.get_world_config = lambda: config._get_world_config_with_table_adjustment() + return config + + @classmethod + def franka_stack_cube_config(cls) -> "CuroboPlannerConfig": + """Create configuration for Franka stacking a normal cube.""" + config = cls.franka_config() + config.n_repeat = None + config.motion_step_size = None + config.visualize_spheres = False + config.visualize_plan = False + config.debug_planner = True + config.motion_noise_scale = 0.0 + config.motion_step_size = None + config.n_repeat = None + config.collision_activation_distance = 0.01 + config.approach_distance = 0.05 + config.retreat_distance = 0.05 + config.get_world_config = lambda: config._get_world_config_with_table_adjustment() + return config + + @classmethod + def from_task_name(cls, task_name: str) -> "CuroboPlannerConfig": + """Create configuration from task name. + + Args: + task_name: Task name (e.g., "Isaac-Stack-Cube-Bin-Franka-v0") + + Returns: + CuroboPlannerConfig: Configuration for the specified task + """ + task_lower = task_name.lower() + + if "stack-cube-bin" in task_lower: + return cls.franka_stack_cube_bin_config() + elif "stack-square-nut" in task_lower: + return cls.franka_stack_square_nut_config() + elif "stack-cube" in task_lower: + return cls.franka_stack_cube_config() + else: + # Default to Franka configuration + print(f"Warning: Unknown robot in task '{task_name}', using Franka configuration") + return cls.franka_config() diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py new file mode 100644 index 00000000000..44eb0a94228 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -0,0 +1,968 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Utility for visualizing motion plans using Rerun. + +This module provides tools to visualize motion plans, robot poses, and collision spheres +using Rerun's visualization capabilities. It helps in debugging and validating collision-free paths. +""" + +import atexit +import numpy as np +import os +import signal +import subprocess +import threading +import time +import torch +import weakref +from typing import TYPE_CHECKING, Any, Optional + +import rerun as rr +from curobo.types.state import JointState + +import isaaclab.utils.math as PoseUtils + +# Import psutil for process management +try: + import psutil + + PSUTIL_AVAILABLE = True +except ImportError: + PSUTIL_AVAILABLE = False + print("Warning: psutil not available. Process monitoring will be limited.") + +if TYPE_CHECKING: # For type hints only + import trimesh + + +# Global registry to track all PlanVisualizer instances for cleanup +_GLOBAL_PLAN_VISUALIZERS: list["PlanVisualizer"] = [] + + +def _cleanup_all_plan_visualizers(): + """Enhanced global cleanup function with better process killing.""" + global _GLOBAL_PLAN_VISUALIZERS + + if PSUTIL_AVAILABLE: + killed_count = 0 + for proc in psutil.process_iter(["pid", "name", "cmdline"]): + # Check if it's a rerun process + if (proc.info["name"] and "rerun" in proc.info["name"].lower()) or ( + proc.info["cmdline"] and any("rerun" in str(arg).lower() for arg in proc.info["cmdline"]) + ): + proc.kill() + killed_count += 1 + + print(f"Killed {killed_count} Rerun viewer processes on script exit") + else: + # Fallback to pkill + subprocess.run(["pkill", "-f", "rerun"], stderr=subprocess.DEVNULL, check=False) + print("Used pkill fallback to close Rerun processes") + + # Also clean up individual instances + for visualizer in _GLOBAL_PLAN_VISUALIZERS[:]: + if not visualizer._closed: + visualizer.close() + + _GLOBAL_PLAN_VISUALIZERS.clear() + + +# Register global cleanup on module import +atexit.register(_cleanup_all_plan_visualizers) + + +class PlanVisualizer: + """Visualizes motion plans using Rerun. + + This class provides methods to visualize: + 1. Robot poses along a planned trajectory + 2. Attached objects and their collision spheres + 3. Robot collision spheres + 4. Target poses and waypoints + """ + + def __init__( + self, + robot_name: str = "panda", + recording_id: str | None = None, + debug: bool = False, + save_path: str | None = None, + base_translation: np.ndarray | None = None, + ): + """Initialize the plan visualizer. + + Args: + robot_name: Name of the robot for visualization + recording_id: Optional ID for the Rerun recording + debug: Whether to print debug information + save_path: Optional path to save the recording + base_translation: Optional base translation to apply to all visualized entities + """ + self.robot_name = robot_name + self.debug = debug + self.recording_id = recording_id or f"motion_plan_{robot_name}" + self.save_path = save_path + self._closed = False + # Translation offset applied to all visualized entities (for multi-env setups) + self._base_translation = ( + np.array(base_translation, dtype=float) if base_translation is not None else np.zeros(3) + ) + + # Enhanced process management + self._parent_pid = os.getpid() + self._monitor_thread = None + self._monitor_active = False + + # Motion generator reference for sphere animation (set by CuroboPlanner) + self._motion_gen_ref = None + + # Register this instance globally for cleanup + global _GLOBAL_PLAN_VISUALIZERS + _GLOBAL_PLAN_VISUALIZERS.append(self) + + # Initialize Rerun + rr.init(self.recording_id, spawn=False) + + # Spawn viewer and keep handle if provided so we can terminate it later + try: + self._rerun_process = rr.spawn() + except Exception: + # Older versions of Rerun may not return a process handle + self._rerun_process = None + + # Set up coordinate system + rr.log("world", rr.ViewCoordinates.RIGHT_HAND_Y_UP) + + # Store visualization state + self._current_frame = 0 + self._sphere_entities: dict[str, list[str]] = {"robot": [], "attached": [], "target": []} + + # Start enhanced parent process monitoring + self._start_parent_process_monitoring() + + # Use weakref.finalize for cleanup when object is garbage collected + self._finalizer = weakref.finalize( + self, self._cleanup_class_resources, self.recording_id, self.save_path, debug + ) + + # Also register atexit handler as backup for normal script termination + # Store values locally to avoid capturing self in the closure + recording_id = self.recording_id + save_path = self.save_path + debug_flag = debug + atexit.register(self._cleanup_class_resources, recording_id, save_path, debug_flag) + + # Store original signal handlers so we can restore them after cleanup + self._original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) + self._original_sigterm_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL) + + # Handle Ctrl+C (SIGINT) and termination (SIGTERM) signals + def signal_handler(signum, frame): + if self.debug: + print(f"Received signal {signum}, closing Rerun viewer...") + self._cleanup_on_exit() + + # Restore original signal handler and re-raise the signal + if signum == signal.SIGINT: + signal.signal(signal.SIGINT, self._original_sigint_handler) + elif signum == signal.SIGTERM: + signal.signal(signal.SIGTERM, self._original_sigterm_handler) + + # Re-raise the signal so Isaac Lab can handle it normally + import os + + os.kill(os.getpid(), signum) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + if self.debug: + print(f"Initialized Rerun visualization with recording ID: {self.recording_id}") + if np.linalg.norm(self._base_translation) > 0: + print(f"Applying translation offset: {self._base_translation}") + if PSUTIL_AVAILABLE: + print("Enhanced process monitoring enabled") + + def _start_parent_process_monitoring(self): + """Start monitoring the parent process and cleanup when it dies.""" + if not PSUTIL_AVAILABLE: + if self.debug: + print("psutil not available, skipping parent process monitoring") + return + + self._monitor_active = True + + def monitor_parent_process(): + """Monitor thread function that watches the parent process.""" + if self.debug: + print(f"Starting parent process monitor for PID {self._parent_pid}") + + # Get parent process handle + parent_process = psutil.Process(self._parent_pid) + + # Monitor parent process + while self._monitor_active: + try: + if not parent_process.is_running(): + if self.debug: + print(f"Parent process {self._parent_pid} died, cleaning up Rerun...") + self._kill_rerun_processes() + break + + # Check every 2 seconds + time.sleep(2) + + except (psutil.NoSuchProcess, psutil.AccessDenied): + if self.debug: + print(f"Parent process {self._parent_pid} no longer accessible, cleaning up...") + self._kill_rerun_processes() + break + except Exception as e: + if self.debug: + print(f"Error in parent process monitor: {e}") + break + + # Start monitor thread + self._monitor_thread = threading.Thread(target=monitor_parent_process, daemon=True) + self._monitor_thread.start() + + def _kill_rerun_processes(self): + """Enhanced method to kill Rerun viewer processes using psutil.""" + try: + if PSUTIL_AVAILABLE: + killed_count = 0 + for proc in psutil.process_iter(["pid", "name", "cmdline"]): + try: + # Check if it's a rerun process + is_rerun = False + + # Check process name + if proc.info["name"] and "rerun" in proc.info["name"].lower(): + is_rerun = True + + # Check command line arguments + if proc.info["cmdline"] and any("rerun" in str(arg).lower() for arg in proc.info["cmdline"]): + is_rerun = True + + if is_rerun: + proc.kill() + killed_count += 1 + if self.debug: + print(f"Killed Rerun process {proc.info['pid']} ({proc.info['name']})") + + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + # Process already dead or inaccessible + pass + except Exception as e: + if self.debug: + print(f"Error killing process: {e}") + + if self.debug: + print(f"Killed {killed_count} Rerun processes using psutil") + + else: + # Fallback to pkill if psutil not available + result = subprocess.run(["pkill", "-f", "rerun"], stderr=subprocess.DEVNULL, check=False) + if self.debug: + print(f"Used pkill fallback (return code: {result.returncode})") + + except Exception as e: + if self.debug: + print(f"Error killing rerun processes: {e}") + + @staticmethod + def _cleanup_class_resources(recording_id: str, save_path: str | None, debug: bool) -> None: + """Static method for cleanup that doesn't hold references to the instance. + + This is called by weakref.finalize when the object is garbage collected. + """ + if debug: + print(f"Cleaning up Rerun visualization for {recording_id}") + + # Disconnect from Rerun + rr.disconnect() + + # Save to file if requested + if save_path is not None: + rr.save(save_path) + if debug: + print(f"Saved Rerun recording to {save_path}") + + # Enhanced process killing + if PSUTIL_AVAILABLE: + killed_count = 0 + for proc in psutil.process_iter(["pid", "name", "cmdline"]): + if (proc.info["name"] and "rerun" in proc.info["name"].lower()) or ( + proc.info["cmdline"] and any("rerun" in str(arg).lower() for arg in proc.info["cmdline"]) + ): + proc.kill() + killed_count += 1 + + if debug: + print(f"Killed {killed_count} Rerun processes during cleanup") + else: + subprocess.run(["pkill", "-f", "rerun"], stderr=subprocess.DEVNULL, check=False) + + if debug: + print("Cleanup completed") + + def _cleanup_on_exit(self) -> None: + """Manual cleanup method for signal handlers.""" + if not self._closed: + # Stop monitoring thread + self._monitor_active = False + + self.close() + self._kill_rerun_processes() + + def close(self) -> None: + """Close the Rerun visualization with enhanced cleanup.""" + if self._closed: + return + + # Stop parent process monitoring + self._monitor_active = False + if self._monitor_thread and self._monitor_thread.is_alive(): + # Give the thread a moment to stop gracefully + time.sleep(0.1) + + # Disconnect from Rerun (closes connections, servers, and files) + rr.disconnect() + + # Save to file if requested + if self.save_path is not None: + rr.save(self.save_path) + if self.debug: + print(f"Saved Rerun recording to {self.save_path}") + + self._closed = True + + # Terminate viewer process if we have a handle + try: + process = getattr(self, "_rerun_process", None) + if process is not None and process.poll() is None: + process.terminate() + try: + process.wait(timeout=5) + except Exception: + process.kill() + except Exception: + pass + + # Enhanced process killing + self._kill_rerun_processes() + + # Remove from global registry + global _GLOBAL_PLAN_VISUALIZERS + if self in _GLOBAL_PLAN_VISUALIZERS: + _GLOBAL_PLAN_VISUALIZERS.remove(self) + + if self.debug: + print("Closed Rerun visualization with enhanced cleanup") + + def visualize_plan( + self, + plan: JointState, + target_pose: torch.Tensor, + robot_spheres: list[Any] | None = None, + attached_spheres: list[Any] | None = None, + ee_positions: np.ndarray | None = None, + finger_positions: np.ndarray | None = None, + frame_duration: float = 0.1, + world_scene: Optional["trimesh.Scene"] = None, + ) -> None: + """Visualize a complete motion plan with all components. + + Args: + plan: Joint state trajectory to visualize + target_pose: Target end-effector pose + robot_spheres: Optional list of robot collision spheres + attached_spheres: Optional list of attached object spheres + ee_positions: Optional end-effector positions + finger_positions: Optional finger positions + frame_duration: Duration between frames in seconds + world_scene: Optional world scene to visualize + """ + if self.debug: + robot_count = len(robot_spheres) if robot_spheres else 0 + attached_count = len(attached_spheres) if attached_spheres else 0 + offset_info = ( + f"offset={self._base_translation}" if np.linalg.norm(self._base_translation) > 0 else "no offset" + ) + print( + f"Visualizing plan: {len(plan.position)} waypoints, {robot_count} robot spheres (with offset)," + f" {attached_count} attached spheres (no offset), {offset_info}" + ) + + # Set timeline for static visualization (separate from animation) + rr.set_time_sequence("static_plan", self._current_frame) + self._current_frame += 1 + + # Clear previous visualization of dynamic entities (keep static meshes) + self._clear_visualization() + + # If a scene is supplied and not yet logged, draw it once + if world_scene is not None: + self._visualize_world_scene(world_scene) + + # Visualize target pose + self._visualize_target_pose(target_pose) + + # Visualize trajectory (end-effector positions if provided) + self._visualize_trajectory(plan, frame_duration, ee_positions, finger_positions) + + # Visualize spheres if provided + if robot_spheres: + self._visualize_robot_spheres(robot_spheres) + if attached_spheres: + self._visualize_attached_spheres(attached_spheres) + else: + # Clear any existing attached sphere visualization when no objects are attached + self._clear_attached_spheres() + + def _clear_visualization(self) -> None: + """Clear all visualization entities.""" + # Clear dynamic trajectory, target, and finger logs to avoid artifacts between visualizations + dynamic_paths = [ + "trajectory", + "finger_trajectory", + "trajectory", # keyframe children cleared recursively + "finger", # keyframe children cleared recursively + "target", + ] + + for path in dynamic_paths: + rr.log(f"world/{path}", rr.Clear(recursive=True)) + + for entity_type, entities in self._sphere_entities.items(): + for entity in entities: + rr.log(f"world/{entity_type}/{entity}", rr.Clear(recursive=True)) + self._sphere_entities[entity_type] = [] + self._current_frame = 0 + + def _visualize_target_pose(self, target_pose: torch.Tensor) -> None: + """Visualize the target end-effector pose. + + Args: + target_pose: Target pose as 4x4 transformation matrix + """ + pos, rot = PoseUtils.unmake_pose(target_pose) + + # Convert to numpy arrays + pos_np = pos.detach().cpu().numpy() if torch.is_tensor(pos) else np.array(pos) + rot_np = rot.detach().cpu().numpy() if torch.is_tensor(rot) else np.array(rot) + + # Ensure arrays are the right shape + pos_np = pos_np.reshape(-1) + rot_np = rot_np.reshape(3, 3) + + # Apply translation offset + pos_np += self._base_translation + + # Log target position + rr.log( + "world/target/position", + rr.Points3D( + positions=np.array([pos_np]), + colors=[[255, 0, 0]], # Red + radii=[0.02], + ), + ) + + # Log target orientation as coordinate frame + rr.log( + "world/target/frame", + rr.Transform3D( + translation=pos_np, + rotation=rr.RotationAxisAngle( + axis=[0, 0, 1], + angle=np.arccos(rot_np[0, 0]) * 2, + ), + ), + ) + + def _visualize_trajectory( + self, + plan: JointState, + frame_duration: float, + ee_positions: np.ndarray | None = None, + finger_positions: np.ndarray | None = None, + ) -> None: + """Visualize the robot trajectory. + + Args: + plan: Joint state trajectory + frame_duration: Duration between frames in seconds + ee_positions: Optional end-effector positions + finger_positions: Optional finger positions + """ + if ee_positions is None: + raw = plan.position.detach().cpu().numpy() if torch.is_tensor(plan.position) else np.array(plan.position) + if raw.shape[1] >= 3: + positions = raw[:, :3] + else: + raise ValueError("ee_positions not provided and joint positions are not 3-D") + else: + positions = ee_positions + + # Apply translation offset + positions = positions + self._base_translation + + # Log trajectory points + rr.log( + "world/trajectory", + rr.LineStrips3D( + [positions], # single strip consisting of all waypoints + colors=[[0, 100, 255]], # Blue + radii=[0.005], + ), + ) + + # Log keyframes + for i, pos in enumerate(positions): + rr.log( + f"world/trajectory/keyframe_{i}", + rr.Points3D( + positions=np.array([pos]), + colors=[[0, 100, 255]], # Blue + radii=[0.01], + ), + ) + + # Log finger path if provided + if finger_positions is not None: + if len(finger_positions) == 0: + return + try: + rr.log( + "world/finger_trajectory", + rr.LineStrips3D( + [finger_positions], + colors=[[0, 0, 255]], # Blue + radii=[0.005], + ), + ) + for i, pos in enumerate(finger_positions): + rr.log( + f"world/finger/keyframe_{i}", + rr.Points3D( + positions=np.array([pos + self._base_translation]), colors=[[0, 0, 255]], radii=[0.01] + ), + ) + except Exception as e: + print(f"Error visualizing finger trajectory: {e}") + + def _visualize_robot_spheres(self, spheres: list[Any]) -> None: + """Visualize robot collision spheres. + + Args: + spheres: List of robot collision spheres + """ + self._log_spheres( + spheres=spheres, + entity_type="robot", + color=[0, 255, 100, 128], # Semi-transparent green + apply_offset=True, + ) + + def _visualize_attached_spheres(self, spheres: list[Any]) -> None: + """Visualize attached object collision spheres. + + Args: + spheres: List of attached object spheres + """ + self._log_spheres( + spheres=spheres, + entity_type="attached", + color=[255, 0, 0, 128], # Semi-transparent red + apply_offset=False, + ) + + def _clear_attached_spheres(self) -> None: + """Clear all attached object spheres.""" + for entity_id in self._sphere_entities.get("attached", []): + rr.log(f"world/attached/{entity_id}", rr.Clear(recursive=True)) + self._sphere_entities["attached"] = [] + + # --------------------------------------------------------------------- + # PRIVATE UTILITIES + # --------------------------------------------------------------------- + + def _log_spheres( + self, + spheres: list[Any], + entity_type: str, + color: list[int], + apply_offset: bool = False, + ) -> None: + """Generic helper for sphere visualization. + + Args: + spheres: List of CuRobo ``Sphere`` objects. + entity_type: Log path prefix (``robot`` or ``attached``). + color: RGBA color for the spheres. + apply_offset: Whether to add ``self._base_translation`` to sphere positions. + """ + for i, sphere in enumerate(spheres): + entity_id = f"sphere_{i}" + # Track entities so we can clear them later + self._sphere_entities.setdefault(entity_type, []).append(entity_id) + + # Convert position to numpy and optionally apply offset + pos = ( + sphere.position.detach().cpu().numpy() + if torch.is_tensor(sphere.position) + else np.array(sphere.position) + ) + if apply_offset: + pos = pos + self._base_translation + pos = pos.reshape(-1) # Ensure 1-D + + # Log sphere via Rerun + rr.log( + f"world/{entity_type}/{entity_id}", + rr.Points3D(positions=np.array([pos]), colors=[color], radii=[float(sphere.radius)]), + ) + + def _visualize_world_scene(self, scene: "trimesh.Scene") -> None: + """Log world geometry and dynamic transforms each call. + + Geometry is sent once (cached), but transforms are updated every invocation + so objects that move (cubes after randomization) appear at the correct + pose per episode/frame. + """ + import trimesh # local import to avoid hard dependency at top + + def _to_rerun_mesh(mesh: "trimesh.Trimesh") -> "rr.Mesh3D": + # Basic conversion without materials + return rr.Mesh3D( + vertex_positions=mesh.vertices, + triangle_indices=mesh.faces, + vertex_normals=mesh.vertex_normals if mesh.vertex_normals is not None else None, + ) + + if not hasattr(self, "_logged_geometry"): + self._logged_geometry = set() + + for node in scene.graph.nodes_geometry: + tform, geom_key = scene.graph.get(node) + mesh = scene.geometry.get(geom_key) + if mesh is None: + continue + rr_path = f"world/scene/{node.replace('/', '_')}" + + # Always update transform (objects may move between calls) + # NOTE: World scene objects are already in correct world coordinates, no offset needed + rr.log( + rr_path, + rr.Transform3D( + translation=tform[:3, 3], + mat3x3=tform[:3, :3], + axis_length=0.25, + ), + static=False, + ) + + # Geometry: send only once per node to avoid duplicates + if rr_path not in self._logged_geometry: + if isinstance(mesh, trimesh.Trimesh): + rr_mesh = _to_rerun_mesh(mesh) + elif isinstance(mesh, trimesh.PointCloud): + rr_mesh = rr.Points3D(positions=mesh.vertices, colors=mesh.colors) + else: + continue + + rr.log(rr_path, rr_mesh, static=True) + self._logged_geometry.add(rr_path) + + if self.debug: + print(f"Logged/updated {len(scene.graph.nodes_geometry)} world nodes in Rerun") + + def animate_plan( + self, + ee_positions: np.ndarray, + object_positions: dict[str, np.ndarray] | None = None, + timeline: str = "plan", + point_radius: float = 0.01, + ) -> None: + """Animate robot end-effector and (optionally) attached object positions over time using Rerun. + + This helper logs a single 3-D point per timestep so that Rerun can play back the + trajectory on the provided ``timeline``. It is intentionally lightweight and does + not attempt to render the full robot geometry—only key points—keeping the data + transfer to the viewer minimal. + + Args: + ee_positions: Array of shape (T, 3) with end-effector world positions. + object_positions: Mapping from object name to an array (T, 3) with that + object's world positions. Each trajectory must be at least as long as + ``ee_positions``; extra entries are ignored. + timeline: Name of the Rerun timeline used for the animation frames. + point_radius: Visual radius (in metres) of the rendered points. + """ + if ee_positions is None or len(ee_positions) == 0: + return + + # Iterate over timesteps and log a frame on the chosen timeline + for idx, pos in enumerate(ee_positions): + # Set time on the chosen timeline (creates it if needed) + rr.set_time_sequence(timeline, idx) + + # Log end-effector marker (needs offset for multi-env) + rr.log( + "world/anim/ee", + rr.Points3D( + positions=np.array([pos + self._base_translation]), + colors=[[0, 100, 255]], # Blue + radii=[point_radius], + ), + ) + + # Optionally log attached object markers + # NOTE: Object positions are already in world coordinates, no offset needed + if object_positions is not None: + for name, traj in object_positions.items(): + if idx >= len(traj): + continue + rr.log( + f"world/anim/{name}", + rr.Points3D( + positions=np.array([traj[idx]]), + colors=[[255, 128, 0]], # Orange + radii=[point_radius], + ), + ) + + def animate_spheres_along_path( + self, + plan: JointState, + robot_spheres_at_start: list[Any] | None = None, + attached_spheres_at_start: list[Any] | None = None, + timeline: str = "sphere_animation", + frame_duration: float = 0.1, + interpolation_steps: int = 10, + ) -> None: + """Animate robot and attached object spheres along the planned trajectory with smooth interpolation. + + This method creates a dense, interpolated trajectory and computes sphere positions + at many intermediate points to create smooth animation of the robot moving along the path. + + Args: + plan: Joint state trajectory to animate spheres along + robot_spheres_at_start: Initial robot collision spheres (for reference) + attached_spheres_at_start: Initial attached object spheres (for reference) + timeline: Name of the Rerun timeline for the animation + frame_duration: Duration between frames in seconds + interpolation_steps: Number of interpolated steps between each waypoint pair + """ + if plan is None or len(plan.position) == 0: + if self.debug: + print("No plan available for sphere animation") + return + + if self.debug: + robot_count = len(robot_spheres_at_start) if robot_spheres_at_start else 0 + attached_count = len(attached_spheres_at_start) if attached_spheres_at_start else 0 + print(f"Creating smooth animation for {robot_count} robot spheres and {attached_count} attached spheres") + print( + f"Original plan: {len(plan.position)} waypoints, interpolating with {interpolation_steps} steps between" + " waypoints" + ) + + # We need access to the motion generator to compute spheres at each waypoint + if not hasattr(self, "_motion_gen_ref") or self._motion_gen_ref is None: + if self.debug: + print("Motion generator reference not available for sphere animation") + return + + motion_gen = self._motion_gen_ref + + # Validate motion generator has required attributes + if not hasattr(motion_gen, "kinematics") or motion_gen.kinematics is None: + if self.debug: + print("Motion generator kinematics not available for sphere animation") + return + + # Clear static spheres to avoid visual clutter during animation + self._hide_static_spheres_for_animation() + + # Count robot link spheres (excluding attached objects) for consistent splitting + robot_link_count = 0 + if robot_spheres_at_start: + robot_link_count = len(robot_spheres_at_start) + + # Create interpolated trajectory for smooth animation + interpolated_positions = self._create_interpolated_trajectory(plan, interpolation_steps) + + if self.debug: + print(f"Created interpolated trajectory with {len(interpolated_positions)} total frames") + + # Animate spheres along the interpolated trajectory + for frame_idx, joint_positions in enumerate(interpolated_positions): + # Set time on the animation timeline with consistent timing + rr.set_time_sequence(timeline, frame_idx) + + # Create joint state for this interpolated position + if isinstance(joint_positions, torch.Tensor): + sphere_position = joint_positions + else: + sphere_position = torch.tensor(joint_positions) + + # Ensure tensor is on the right device for CuRobo + if hasattr(motion_gen, "tensor_args") and motion_gen.tensor_args is not None: + sphere_position = motion_gen.tensor_args.to_device(sphere_position) + + # Get spheres at this configuration + try: + sphere_list = motion_gen.kinematics.get_robot_as_spheres(sphere_position)[0] + except Exception as e: + if self.debug: + print(f"Failed to compute spheres for frame {frame_idx}: {e}") + continue + + # Handle sphere_list as either a list or single Sphere object + if hasattr(sphere_list, "__iter__") and not hasattr(sphere_list, "position"): + sphere_items = list(sphere_list) + else: + sphere_items = [sphere_list] + + # Separate robot and attached object spheres with different colors + robot_sphere_positions = [] + robot_sphere_radii = [] + attached_sphere_positions = [] + attached_sphere_radii = [] + + for i, sphere in enumerate(sphere_items): + # Convert position to numpy + pos = ( + sphere.position.detach().cpu().numpy() + if torch.is_tensor(sphere.position) + else np.array(sphere.position) + ) + pos = pos.reshape(-1) + radius = float(sphere.radius) + + if i < robot_link_count: + # Robot sphere - needs base translation offset + robot_sphere_positions.append(pos + self._base_translation) + robot_sphere_radii.append(radius) + else: + # Attached object sphere - already in world coordinates + attached_sphere_positions.append(pos) + attached_sphere_radii.append(radius) + + # Log robot spheres with green color + if robot_sphere_positions: + rr.log( + "world/robot_animation", + rr.Points3D( + positions=np.array(robot_sphere_positions), + colors=[[0, 255, 100, 220]] * len(robot_sphere_positions), # Bright green + radii=robot_sphere_radii, + ), + ) + + # Log attached object spheres with orange color (or clear if no attached objects) + if attached_sphere_positions: + rr.log( + "world/attached_animation", + rr.Points3D( + positions=np.array(attached_sphere_positions), + colors=[[255, 150, 0, 220]] * len(attached_sphere_positions), # Bright orange + radii=attached_sphere_radii, + ), + ) + else: + # Clear attached object spheres when no objects are attached + rr.log("world/attached_animation", rr.Clear(recursive=True)) + + if self.debug: + print( + f"Completed smooth sphere animation with {len(interpolated_positions)} frames on timeline '{timeline}'" + ) + + def _hide_static_spheres_for_animation(self) -> None: + """Hide static sphere visualization during animation to reduce visual clutter.""" + # Clear static robot spheres + for entity_id in self._sphere_entities.get("robot", []): + rr.log(f"world/robot/{entity_id}", rr.Clear(recursive=True)) + + # Clear static attached spheres + for entity_id in self._sphere_entities.get("attached", []): + rr.log(f"world/attached/{entity_id}", rr.Clear(recursive=True)) + + if self.debug: + print("Hidden static spheres for cleaner animation view") + + def _create_interpolated_trajectory(self, plan: JointState, interpolation_steps: int) -> list[torch.Tensor]: + """Create a smooth interpolated trajectory between waypoints. + + Args: + plan: Original joint state trajectory + interpolation_steps: Number of interpolation steps between each waypoint pair + + Returns: + List of interpolated joint positions + """ + if len(plan.position) < 2: + # If only one waypoint, just return it + return [plan.position[0] if isinstance(plan.position[0], torch.Tensor) else torch.tensor(plan.position[0])] + + interpolated_positions = [] + + # Convert plan positions to tensors if needed + waypoints = [] + for i in range(len(plan.position)): + pos = plan.position[i] + if isinstance(pos, torch.Tensor): + waypoints.append(pos) + else: + waypoints.append(torch.tensor(pos)) + + # Interpolate between each pair of consecutive waypoints + for i in range(len(waypoints) - 1): + start_pos = waypoints[i] + end_pos = waypoints[i + 1] + + # Create interpolation steps between start and end + for step in range(interpolation_steps): + alpha = step / interpolation_steps + interpolated_pos = start_pos * (1 - alpha) + end_pos * alpha + interpolated_positions.append(interpolated_pos) + + # Add the final waypoint + interpolated_positions.append(waypoints[-1]) + + return interpolated_positions + + def set_motion_generator_reference(self, motion_gen: Any) -> None: + """Set the motion generator reference for sphere animation. + + Args: + motion_gen: CuRobo motion generator instance + """ + self._motion_gen_ref = motion_gen + + def restore_static_spheres(self) -> None: + """Restore static sphere visualization after animation.""" + # Note: This would require re-calling visualize_plan to recreate the static spheres + if self.debug: + print("To restore static spheres, call visualize_plan() again") + + def clear_animation_spheres(self) -> None: + """Clear animated sphere visualization.""" + rr.log("world/robot_animation", rr.Clear(recursive=True)) + rr.log("world/attached_animation", rr.Clear(recursive=True)) + + if self.debug: + print("Cleared animation spheres") + + def clear_attached_animation_spheres(self) -> None: + """Clear only attached object animation spheres.""" + rr.log("world/attached_animation", rr.Clear(recursive=True)) + + if self.debug: + print("Cleared attached object animation spheres") diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py new file mode 100644 index 00000000000..44f71fc99e1 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py @@ -0,0 +1,248 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +# Copyright (c) 2024-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import random +from typing import Any + +import pytest + +SEED: int = 42 +random.seed(SEED) + +from isaaclab.app import AppLauncher + +headless = True +app_launcher = AppLauncher(headless=headless) +simulation_app: Any = app_launcher.app + +import gymnasium as gym +import torch + +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation, RigidObject +from isaaclab.envs.manager_based_env import ManagerBasedEnv +from isaaclab.markers import FRAME_MARKER_CFG, VisualizationMarkers + +from isaaclab_mimic.envs.franka_stack_ik_rel_mimic_env_cfg import FrankaCubeStackIKRelMimicEnvCfg +from isaaclab_mimic.motion_planners.curobo.curobo_planner import CuroboPlanner +from isaaclab_mimic.motion_planners.curobo.curobo_planner_config import CuroboPlannerConfig + +GRIPPER_OPEN_CMD: float = 1.0 +GRIPPER_CLOSE_CMD: float = -1.0 + + +def _eef_name(env: ManagerBasedEnv) -> str: + return list(env.cfg.subtask_configs.keys())[0] + + +def _action_from_pose( + env: ManagerBasedEnv, target_pose: torch.Tensor, gripper_binary_action: float, env_id: int = 0 +) -> torch.Tensor: + eef = _eef_name(env) + play_action = env.target_eef_pose_to_action( + target_eef_pose_dict={eef: target_pose}, + gripper_action_dict={eef: torch.tensor([gripper_binary_action], device=env.device, dtype=torch.float32)}, + env_id=env_id, + ) + if play_action.dim() == 1: + play_action = play_action.unsqueeze(0) + return play_action + + +def _env_step_with_action(env: ManagerBasedEnv, action: torch.Tensor) -> None: + env.step(action) + + +def _execute_plan(env: ManagerBasedEnv, planner: CuroboPlanner, gripper_binary_action: float, env_id: int = 0) -> None: + """Execute planner's EEF planned poses using env.step with IK-relative controller actions.""" + planned_poses = planner.get_planned_poses() + if not planned_poses: + return + for pose in planned_poses: + action = _action_from_pose(env, pose, gripper_binary_action, env_id=env_id) + _env_step_with_action(env, action) + + +def _execute_gripper_action( + env: ManagerBasedEnv, robot: Articulation, gripper_binary_action: float, steps: int = 12, env_id: int = 0 +) -> None: + """Hold current EEF pose and toggle gripper for a few steps.""" + eef = _eef_name(env) + curr_pose = env.get_robot_eef_pose(eef_name=eef, env_ids=[env_id])[0] + for _ in range(steps): + action = _action_from_pose(env, curr_pose, gripper_binary_action, env_id=env_id) + _env_step_with_action(env, action) + + +DOWN_FACING_QUAT = torch.tensor([0.0, 1.0, 0.0, 0.0], dtype=torch.float32) + + +@pytest.fixture(scope="class") +def cube_stack_test_env(): + """Create the environment and motion planner once for the test suite and yield them.""" + random.seed(SEED) + torch.manual_seed(SEED) + + env_cfg = FrankaCubeStackIKRelMimicEnvCfg() + env_cfg.scene.num_envs = 1 + for frame in env_cfg.scene.ee_frame.target_frames: + if frame.name == "end_effector": + print(f"Setting end effector offset from {frame.offset.pos} to (0.0, 0.0, 0.0) for SkillGen parity") + frame.offset.pos = (0.0, 0.0, 0.0) + + env: ManagerBasedEnv = gym.make( + "Isaac-Stack-Cube-Franka-IK-Rel-Mimic-v0", + cfg=env_cfg, + headless=headless, + ).unwrapped + env.reset() + + robot: Articulation = env.scene["robot"] + planner_cfg = CuroboPlannerConfig.franka_stack_cube_config() + planner_cfg.visualize_plan = False + planner_cfg.visualize_spheres = False + planner_cfg.debug_planner = True + planner_cfg.retreat_distance = 0.05 + planner_cfg.approach_distance = 0.05 + planner_cfg.time_dilation_factor = 1.0 + + planner = CuroboPlanner( + env=env, + robot=robot, + config=planner_cfg, + env_id=0, + ) + + goal_pose_visualizer = None + if not headless: + marker_cfg = FRAME_MARKER_CFG.replace(prim_path="/World/Visuals/goal_pose") + marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + goal_pose_visualizer = VisualizationMarkers(marker_cfg) + + yield { + "env": env, + "robot": robot, + "planner": planner, + "goal_pose_visualizer": goal_pose_visualizer, + } + + env.close() + simulation_app.close() + + +class TestCubeStackPlanner: + @pytest.fixture(autouse=True) + def setup(self, cube_stack_test_env): + self.env: ManagerBasedEnv = cube_stack_test_env["env"] + self.robot: Articulation = cube_stack_test_env["robot"] + self.planner: CuroboPlanner = cube_stack_test_env["planner"] + self.goal_pose_visualizer: VisualizationMarkers | None = cube_stack_test_env["goal_pose_visualizer"] + + def _visualize_goal_pose(self, pos: torch.Tensor, quat: torch.Tensor) -> None: + """Visualize the goal frame markers at pos, quat (xyzw).""" + if headless or self.goal_pose_visualizer is None: + return + self.goal_pose_visualizer.visualize(translations=pos.unsqueeze(0), orientations=quat.unsqueeze(0)) + + def _pose_from_xy_quat(self, xy: torch.Tensor, z: float, quat: torch.Tensor) -> torch.Tensor: + """Build a 4×4 pose given xy (Tensor[2]), z, and quaternion.""" + device = xy.device + dtype = xy.dtype + pos = torch.cat([xy, torch.tensor([z], dtype=dtype, device=device)]) + rot = math_utils.matrix_from_quat(quat.to(device).unsqueeze(0))[0] + return math_utils.make_pose(pos, rot) + + def _get_cube_pos(self, cube_name: str) -> torch.Tensor: + """Return the current world position of a cube's root (x, y, z).""" + obj: RigidObject = self.env.scene[cube_name] + return obj.data.root_pos_w[0, :3].clone().detach() + + def _place_pose_over_cube(self, cube_name: str, height_offset: float) -> torch.Tensor: + """Compute a goal pose directly above the named cube using the latest pose.""" + base_pos = self._get_cube_pos(cube_name) + return self._pose_from_xy_quat(base_pos[:2], base_pos[2].item() + height_offset, DOWN_FACING_QUAT) + + def test_pick_and_stack(self) -> None: + """Plan and execute pick-and-place to stack cube_1 on cube_2, then cube_3 on the stack.""" + cube_1_pos = self._get_cube_pos("cube_1") + cube_2_pos = self._get_cube_pos("cube_2") + cube_3_pos = self._get_cube_pos("cube_3") + print(f"Cube 1 position: {cube_1_pos}") + print(f"Cube 2 position: {cube_2_pos}") + print(f"Cube 3 position: {cube_3_pos}") + + # Approach above cube_1 + pre_grasp_height = 0.1 + pre_grasp_pose = self._pose_from_xy_quat(cube_1_pos[:2], pre_grasp_height, DOWN_FACING_QUAT) + print(f"Pre-grasp pose: {pre_grasp_pose}") + if not headless: + pos_pg = pre_grasp_pose[:3, 3].detach().cpu() + quat_pg = math_utils.quat_from_matrix(pre_grasp_pose[:3, :3].unsqueeze(0))[0].detach().cpu() + self._visualize_goal_pose(pos_pg, quat_pg) + + # Plan to pre-grasp + assert self.planner.update_world_and_plan_motion(pre_grasp_pose), "Failed to plan to pre-grasp pose" + _execute_plan(self.env, self.planner, gripper_binary_action=GRIPPER_OPEN_CMD) + + # Close gripper to grasp cube_1 (hold pose while closing) + _execute_gripper_action(self.env, self.robot, GRIPPER_CLOSE_CMD, steps=16) + + # Plan placement with cube_1 attached (above latest cube_2) + place_pose = self._place_pose_over_cube("cube_2", 0.15) + + if not headless: + pos_place = place_pose[:3, 3].detach().cpu() + quat_place = math_utils.quat_from_matrix(place_pose[:3, :3].unsqueeze(0))[0].detach().cpu() + self._visualize_goal_pose(pos_place, quat_place) + + # Plan with attached object + assert self.planner.update_world_and_plan_motion( + place_pose, expected_attached_object="cube_1" + ), "Failed to plan placement trajectory with attached cube" + _execute_plan(self.env, self.planner, gripper_binary_action=GRIPPER_CLOSE_CMD) + + # Release cube 1 + _execute_gripper_action(self.env, self.robot, GRIPPER_OPEN_CMD, steps=16) + + # Go to cube 3 + cube_3_pos_now = self._get_cube_pos("cube_3") + pre_grasp_pose = self._pose_from_xy_quat(cube_3_pos_now[:2], pre_grasp_height, DOWN_FACING_QUAT) + print(f"Pre-grasp pose: {pre_grasp_pose}") + if not headless: + pos_pg = pre_grasp_pose[:3, 3].detach().cpu() + quat_pg = math_utils.quat_from_matrix(pre_grasp_pose[:3, :3].unsqueeze(0))[0].detach().cpu() + self._visualize_goal_pose(pos_pg, quat_pg) + + assert self.planner.update_world_and_plan_motion( + pre_grasp_pose, expected_attached_object=None + ), "Failed to plan retract motion" + _execute_plan(self.env, self.planner, gripper_binary_action=GRIPPER_OPEN_CMD) + + # Grasp cube 3 + _execute_gripper_action(self.env, self.robot, GRIPPER_CLOSE_CMD) + + # Plan placement with cube_3 attached, to cube 2 (use latest cube_2 pose) + place_pose = self._place_pose_over_cube("cube_2", 0.18) + + if not headless: + pos_place = place_pose[:3, 3].detach().cpu() + quat_place = math_utils.quat_from_matrix(place_pose[:3, :3].unsqueeze(0))[0].detach().cpu() + self._visualize_goal_pose(pos_place, quat_place) + + assert self.planner.update_world_and_plan_motion( + place_pose, expected_attached_object="cube_3" + ), "Failed to plan placement trajectory with attached cube" + _execute_plan(self.env, self.planner, gripper_binary_action=GRIPPER_CLOSE_CMD) + + # Release cube 3 + _execute_gripper_action(self.env, self.robot, GRIPPER_OPEN_CMD) + + print("Pick-and-place stacking test completed successfully!") diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py new file mode 100644 index 00000000000..465961456a2 --- /dev/null +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py @@ -0,0 +1,173 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +import random +from typing import Any + +import pytest + +SEED: int = 42 +random.seed(SEED) + +from isaaclab.app import AppLauncher + +headless = True +app_launcher = AppLauncher(headless=headless) +simulation_app: Any = app_launcher.app + +import gymnasium as gym +import torch + +import isaaclab.utils.assets as _al_assets +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation, RigidObjectCfg +from isaaclab.envs.manager_based_env import ManagerBasedEnv +from isaaclab.markers import FRAME_MARKER_CFG, VisualizationMarkers +from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg + +ISAAC_NUCLEUS_DIR: str = getattr(_al_assets, "ISAAC_NUCLEUS_DIR", "/Isaac") + +from isaaclab_mimic.motion_planners.curobo.curobo_planner import CuroboPlanner +from isaaclab_mimic.motion_planners.curobo.curobo_planner_config import CuroboPlannerConfig + +from isaaclab_tasks.manager_based.manipulation.stack.config.franka.stack_joint_pos_env_cfg import FrankaCubeStackEnvCfg + +# Predefined EE goals for the test +# Each entry is a tuple of: (goal specification, goal ID) +predefined_ee_goals_and_ids = [ + ({"pos": [0.70, -0.25, 0.25], "quat": [0.0, 0.707, 0.0, 0.707]}, "Behind wall, left"), + ({"pos": [0.70, 0.25, 0.25], "quat": [0.0, 0.707, 0.0, 0.707]}, "Behind wall, right"), + ({"pos": [0.65, 0.0, 0.45], "quat": [0.0, 1.0, 0.0, 0.0]}, "Behind wall, center, high"), + ({"pos": [0.80, -0.15, 0.35], "quat": [0.0, 0.5, 0.0, 0.866]}, "Behind wall, far left"), + ({"pos": [0.80, 0.15, 0.35], "quat": [0.0, 0.5, 0.0, 0.866]}, "Behind wall, far right"), +] + + +@pytest.fixture(scope="class") +def curobo_test_env(): + """Set up the environment for the Curobo test and yield test-critical data.""" + random.seed(SEED) + torch.manual_seed(SEED) + + env_cfg = FrankaCubeStackEnvCfg() + env_cfg.scene.num_envs = 1 + + # Add a static wall for the robot to avoid + wall_props = RigidBodyPropertiesCfg(kinematic_enabled=True, disable_gravity=True) + wall_cfg = RigidObjectCfg( + prim_path="/World/envs/env_0/moving_wall", + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", + scale=(0.5, 4.5, 7.0), + rigid_props=wall_props, + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.55, 0.0, 0.80)), + ) + setattr(env_cfg.scene, "moving_wall", wall_cfg) + + env: ManagerBasedEnv = gym.make("Isaac-Stack-Cube-Franka-v0", cfg=env_cfg, headless=headless).unwrapped + env.reset() + + robot = env.scene["robot"] + planner = CuroboPlanner(env=env, robot=robot, config=CuroboPlannerConfig.franka_config()) + + goal_pose_visualizer = None + if not headless: + goal_marker_cfg = FRAME_MARKER_CFG.replace(prim_path="/World/Visuals/goal_poses") + goal_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + goal_pose_visualizer = VisualizationMarkers(goal_marker_cfg) + + # Allow the simulation to settle + for _ in range(3): + env.sim.step(render=False) + + planner.update_world() + + # Default joint positions for the Franka arm (7-DOF) + home_j = torch.tensor([0.0, -0.4, 0.0, -2.1, 0.0, 2.1, 0.7]) + + # Yield the necessary objects for the test + yield { + "env": env, + "robot": robot, + "planner": planner, + "goal_pose_visualizer": goal_pose_visualizer, + "home_j": home_j, + } + + # Teardown: close the environment and simulation app + env.close() + simulation_app.close() + + +class TestCuroboPlanner: + """Test suite for the Curobo motion planner, focusing on obstacle avoidance.""" + + @pytest.fixture(autouse=True) + def setup(self, curobo_test_env): + """Inject the test environment into the test class instance.""" + self.env: ManagerBasedEnv = curobo_test_env["env"] + self.robot: Articulation = curobo_test_env["robot"] + self.planner: CuroboPlanner = curobo_test_env["planner"] + self.goal_pose_visualizer: VisualizationMarkers | None = curobo_test_env["goal_pose_visualizer"] + self.home_j: torch.Tensor = curobo_test_env["home_j"] + + def _visualize_goal_pose(self, pos: torch.Tensor, quat: torch.Tensor): + """Visualize the goal pose using frame markers if not in headless mode.""" + if headless or self.goal_pose_visualizer is None: + return + pos_vis = pos.unsqueeze(0) + quat_vis = quat.unsqueeze(0) + self.goal_pose_visualizer.visualize(translations=pos_vis, orientations=quat_vis) + + def _execute_current_plan(self): + """Replay the waypoints of the current plan in the simulator for visualization.""" + if headless or self.planner.current_plan is None: + return + for q in self.planner.current_plan.position: + q_tensor = q if isinstance(q, torch.Tensor) else torch.as_tensor(q, dtype=torch.float32) + self._set_arm_positions(q_tensor) + self.env.sim.step(render=True) + + def _set_arm_positions(self, q: torch.Tensor): + """Set the joint positions of the robot's arm, appending default gripper values if necessary.""" + if q.dim() == 1: + q = q.unsqueeze(0) + if q.shape[-1] == 7: # Arm only + fingers = torch.tensor([0.04, 0.04], device=q.device, dtype=q.dtype).repeat(q.shape[0], 1) + q_full = torch.cat([q, fingers], dim=-1) + else: + q_full = q + self.robot.write_joint_position_to_sim(q_full) + + @pytest.mark.parametrize("goal_spec, goal_id", predefined_ee_goals_and_ids) + def test_plan_to_predefined_goal(self, goal_spec, goal_id): + """Test planning to a predefined goal, ensuring the planner can find a path around an obstacle.""" + print(f"Planning for goal: {goal_id}") + + # Reset robot to a known home position before each test + self._set_arm_positions(self.home_j) + self.env.sim.step() + + pos = torch.tensor(goal_spec["pos"], dtype=torch.float32) + quat = torch.tensor(goal_spec["quat"], dtype=torch.float32) + + if not headless: + self._visualize_goal_pose(pos, quat) + + # Ensure the goal is actually behind the wall + assert pos[0] > 0.55, f"Goal '{goal_id}' is not behind the wall (x={pos[0]:.3f})" + + rot_matrix = math_utils.matrix_from_quat(quat.unsqueeze(0))[0] + ee_goal = math_utils.make_pose(pos, rot_matrix) + + result = self.planner.plan_motion(ee_goal) + print(f"Planning result for '{goal_id}': {'Success' if result else 'Failure'}") + + assert result, f"Failed to find a motion plan for the goal: '{goal_id}'" + + if result and not headless: + self._execute_current_plan() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py index 5f2480fd5b0..cd979d4018b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/__init__.py @@ -7,6 +7,7 @@ from . import ( agents, + bin_stack_ik_rel_env_cfg, stack_ik_abs_env_cfg, stack_ik_rel_blueprint_env_cfg, stack_ik_rel_env_cfg, @@ -105,3 +106,23 @@ }, disable_env_checker=True, ) + +gym.register( + id="Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": stack_ik_rel_env_cfg.FrankaCubeStackEnvCfg, + "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-Stack-Cube-Bin-Franka-IK-Rel-Mimic-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": bin_stack_ik_rel_env_cfg.FrankaBinStackEnvCfg, + "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_ik_rel_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_ik_rel_env_cfg.py new file mode 100644 index 00000000000..fd4b386249e --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_ik_rel_env_cfg.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg +from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg +from isaaclab.utils import configclass + +from . import bin_stack_joint_pos_env_cfg + +## +# Pre-defined configs +## +from isaaclab_assets.robots.franka import FRANKA_PANDA_HIGH_PD_CFG # isort: skip + + +@configclass +class FrankaBinStackEnvCfg(bin_stack_joint_pos_env_cfg.FrankaBinStackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + # Set Franka as robot + self.scene.robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Set actions for the specific robot type (franka) + self.actions.arm_action = DifferentialInverseKinematicsActionCfg( + asset_name="robot", + joint_names=["panda_joint.*"], + body_name="panda_hand", + controller=DifferentialIKControllerCfg(command_type="pose", use_relative_mode=True, ik_method="dls"), + scale=0.5, + body_offset=DifferentialInverseKinematicsActionCfg.OffsetCfg(pos=[0.0, 0.0, 0.0]), + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py new file mode 100644 index 00000000000..fbc6454bba8 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py @@ -0,0 +1,203 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.sensors import FrameTransformerCfg +from isaaclab.sensors.frame_transformer.frame_transformer_cfg import OffsetCfg +from isaaclab.sim.schemas.schemas_cfg import RigidBodyPropertiesCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import UsdFileCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR + +from isaaclab_tasks.manager_based.manipulation.stack import mdp +from isaaclab_tasks.manager_based.manipulation.stack.mdp import franka_stack_events +from isaaclab_tasks.manager_based.manipulation.stack.stack_env_cfg import StackEnvCfg + +## +# Pre-defined configs +## +from isaaclab.markers.config import FRAME_MARKER_CFG # isort: skip +from isaaclab_assets.robots.franka import FRANKA_PANDA_CFG # isort: skip + + +@configclass +class EventCfg: + """Configuration for events.""" + + init_franka_arm_pose = EventTerm( + func=franka_stack_events.set_default_joint_pose, + # mode="startup", + mode="reset", + params={ + "default_pose": [0.0444, -0.1894, -0.1107, -2.5148, 0.0044, 2.3775, 0.6952, 0.0400, 0.0400], + }, + ) + + randomize_franka_joint_state = EventTerm( + func=franka_stack_events.randomize_joint_by_gaussian_offset, + mode="reset", + params={ + "mean": 0.0, + "std": 0.02, + "asset_cfg": SceneEntityCfg("robot"), + }, + ) + + # Reset blue bin position + reset_blue_bin_pose = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + # Keep bin at fixed position - no randomization + "pose_range": {"x": (0.4, 0.4), "y": (0.0, 0.0), "z": (0.0203, 0.0203), "yaw": (0.0, 0.0)}, + "min_separation": 0.0, + "asset_cfgs": [SceneEntityCfg("blue_sorting_bin")], + }, + ) + + # Reset cube 1 to initial position (inside the bin) + reset_cube_1_pose = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": {"x": (0.4, 0.4), "y": (0.0, 0.0), "z": (0.0203, 0.0203), "yaw": (0.0, 0.0)}, + "min_separation": 0.0, + "asset_cfgs": [SceneEntityCfg("cube_1")], + }, + ) + + # Reset cube 2 and 3 to initial position (outside the bin, to the left and right) + reset_cube_pose = EventTerm( + func=franka_stack_events.randomize_object_pose, + mode="reset", + params={ + "pose_range": {"x": (0.65, 0.70), "y": (-0.18, 0.18), "z": (0.0203, 0.0203), "yaw": (-1.0, 1.0, 0)}, + "min_separation": 0.1, + "asset_cfgs": [SceneEntityCfg("cube_2"), SceneEntityCfg("cube_3")], + }, + ) + + +@configclass +class FrankaBinStackEnvCfg(StackEnvCfg): + def __post_init__(self): + # post init of parent + super().__post_init__() + + # Set events + self.events = EventCfg() + + # Set Franka as robot + self.scene.robot = FRANKA_PANDA_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + self.scene.robot.spawn.semantic_tags = [("class", "robot")] + + # Add semantics to table + self.scene.table.spawn.semantic_tags = [("class", "table")] + + # Add semantics to ground + self.scene.plane.semantic_tags = [("class", "ground")] + + # Set actions for the specific robot type (franka) + self.actions.arm_action = mdp.JointPositionActionCfg( + asset_name="robot", joint_names=["panda_joint.*"], scale=0.5, use_default_offset=True + ) + self.actions.gripper_action = mdp.BinaryJointPositionActionCfg( + asset_name="robot", + joint_names=["panda_finger.*"], + open_command_expr={"panda_finger_.*": 0.04}, + close_command_expr={"panda_finger_.*": 0.0}, + ) + + # Rigid body properties of each cube + cube_properties = RigidBodyPropertiesCfg( + solver_position_iteration_count=40, + solver_velocity_iteration_count=1, + max_angular_velocity=1000.0, + max_linear_velocity=1000.0, + max_depenetration_velocity=5.0, + disable_gravity=False, + ) + + # Blue sorting bin positioned at table center + self.scene.blue_sorting_bin = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/BlueSortingBin", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/nut_pour_task/nut_pour_assets/sorting_bin_blue.usd", + scale=(1.1, 1.6, 3.3), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + ), + ) + + # Cube 1 positioned at the bottom center of the blue bin + # The bin is at (0.4, 0.0, 0.0203), so cube_1 should be slightly above it + self.scene.cube_1 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_1", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.0, 0.025), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/blue_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + ), + ) + + # Cube 2 positioned outside the bin (to the right) + self.scene.cube_2 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_2", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, 0.25, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/red_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + ), + ) + + # Cube 3 positioned outside the bin (to the left) + self.scene.cube_3 = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube_3", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.85, -0.25, 0.0203), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/green_block.usd", + scale=(1.0, 1.0, 1.0), + rigid_props=cube_properties, + ), + ) + + # Listens to the required transforms + marker_cfg = FRAME_MARKER_CFG.copy() + marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + marker_cfg.prim_path = "/Visuals/FrameTransformer" + self.scene.ee_frame = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_link0", + debug_vis=False, + visualizer_cfg=marker_cfg, + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_hand", + name="end_effector", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.0), + ), + ), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_rightfinger", + name="tool_rightfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), + ), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/panda_leftfinger", + name="tool_leftfinger", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.046), + ), + ), + ], + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py index 502f057d4a3..2a65c44eb19 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py @@ -3,6 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause +import os + from isaaclab.assets import RigidObjectCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg @@ -87,6 +89,15 @@ def __post_init__(self): close_command_expr={"panda_finger_.*": 0.0}, ) + # Apply skillgen-specific cube position randomization if enabled + if os.getenv("ISAACLAB_USE_SKILLGEN") == "1": + self.events.randomize_cube_positions.params["pose_range"] = { + "x": (0.45, 0.6), + "y": (-0.23, 0.23), + "z": (0.0203, 0.0203), + "yaw": (-1.0, 1, 0), + } + # Rigid body properties of each cube cube_properties = RigidBodyPropertiesCfg( solver_position_iteration_count=16, @@ -142,7 +153,7 @@ def __post_init__(self): prim_path="{ENV_REGEX_NS}/Robot/panda_hand", name="end_effector", offset=OffsetCfg( - pos=[0.0, 0.0, 0.1034], + pos=[0.0, 0.0, 0.0] if os.getenv("ISAACLAB_USE_SKILLGEN") == "1" else [0.0, 0.0, 0.1034], ), ), FrameTransformerCfg.FrameCfg( From 7acaa436a802d6c23d0c654b94f61c17d9e678a8 Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Wed, 20 Aug 2025 13:15:55 -0700 Subject: [PATCH 2/6] A minor rerun related code update --- .../motion_planners/curobo/plan_visualizer.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py index 44eb0a94228..b231e621434 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -398,7 +398,7 @@ def visualize_plan( ) # Set timeline for static visualization (separate from animation) - rr.set_time_sequence("static_plan", self._current_frame) + rr.set_time("static_plan", sequence=self._current_frame) self._current_frame += 1 # Clear previous visualization of dynamic entities (keep static meshes) @@ -709,7 +709,7 @@ def animate_plan( # Iterate over timesteps and log a frame on the chosen timeline for idx, pos in enumerate(ee_positions): # Set time on the chosen timeline (creates it if needed) - rr.set_time_sequence(timeline, idx) + rr.set_time(timeline, sequence=idx) # Log end-effector marker (needs offset for multi-env) rr.log( @@ -803,7 +803,7 @@ def animate_spheres_along_path( # Animate spheres along the interpolated trajectory for frame_idx, joint_positions in enumerate(interpolated_positions): # Set time on the animation timeline with consistent timing - rr.set_time_sequence(timeline, frame_idx) + rr.set_time(timeline, sequence=frame_idx) # Create joint state for this interpolated position if isinstance(joint_positions, torch.Tensor): @@ -909,7 +909,9 @@ def _create_interpolated_trajectory(self, plan: JointState, interpolation_steps: """ if len(plan.position) < 2: # If only one waypoint, just return it - return [plan.position[0] if isinstance(plan.position[0], torch.Tensor) else torch.tensor(plan.position[0])] + return [ + plan.position[0] if isinstance(plan.position[0], torch.Tensor) else torch.tensor(plan.position[0]) + ] # type: ignore interpolated_positions = [] From cf32f5e4adb5ec0105d00c136d0dfdfc7f0c9891 Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Tue, 26 Aug 2025 20:44:48 -0700 Subject: [PATCH 3/6] Updated cuRobo and isaac sim licenses. Fixed rerun path viz bugs. Minor changes in cuRobo. Added urdf extraction and on-the-fly config creation from nucleus. --- README.md | 4 + docs/licenses/dependencies/cuRobo-license.txt | 93 +++++++ .../dependencies/isaacsim-license.txt | 193 ++++++++++++- pyproject.toml | 2 +- .../motion_planners/curobo/curobo_planner.py | 66 ++--- .../curobo/curobo_planner_config.py | 260 ++++++++++++++---- .../motion_planners/curobo/plan_visualizer.py | 68 ++--- 7 files changed, 522 insertions(+), 164 deletions(-) create mode 100644 docs/licenses/dependencies/cuRobo-license.txt diff --git a/README.md b/README.md index 1a430cae71e..19b157fa82d 100644 --- a/README.md +++ b/README.md @@ -188,6 +188,10 @@ where creativity and technology intersect. Your contributions can make a meaning The Isaac Lab framework is released under [BSD-3 License](LICENSE). The `isaaclab_mimic` extension and its corresponding standalone scripts are released under [Apache 2.0](LICENSE-mimic). The license files of its dependencies and assets are present in the [`docs/licenses`](docs/licenses) directory. +Note that Isaac Lab requires Isaac Sim, which includes components under proprietary licensing terms. Please see the [Isaac Sim license](https://github.com/isaac-sim/IsaacSim/blob/main/LICENSE) for information on Isaac Sim licensing. + +Note that the `isaaclab_mimic` extension requires cuRobo, which has proprietary licensing terms that can be found in [`docs/licenses/dependencies/cuRobo-license.txt`](docs/licenses/dependencies/cuRobo-license.txt). + ## Acknowledgement Isaac Lab development initiated from the [Orbit](https://isaac-orbit.github.io/) framework. We would appreciate if you would cite it in academic publications as well: diff --git a/docs/licenses/dependencies/cuRobo-license.txt b/docs/licenses/dependencies/cuRobo-license.txt new file mode 100644 index 00000000000..70b7c6fb67b --- /dev/null +++ b/docs/licenses/dependencies/cuRobo-license.txt @@ -0,0 +1,93 @@ +NVIDIA ISAAC LAB ADDITIONAL SOFTWARE AND MATERIALS LICENSE + +IMPORTANT NOTICE – PLEASE READ AND AGREE BEFORE USING THE SOFTWARE + +This software license agreement ("Agreement") is a legal agreement between you, whether an individual or entity, ("you") and NVIDIA Corporation ("NVIDIA") and governs the use of the NVIDIA cuRobo and related software and materials that NVIDIA delivers to you under this Agreement ("Software"). NVIDIA and you are each a "party" and collectively the "parties." + +By using the Software, you are affirming that you have read and agree to this Agreement. + +If you don't accept all the terms and conditions below, do not use the Software. + +1. License Grant. The Software made available by NVIDIA to you is licensed, not sold. Subject to the terms of this Agreement, NVIDIA grants you a limited, non-exclusive, revocable, non-transferable, and non-sublicensable (except as expressly granted in this Agreement), license to install and use copies of the Software together with NVIDIA Isaac Lab in systems with NVIDIA GPUs ("Purpose"). + +2. License Restrictions. Your license to use the Software is restricted as stated in this Section 2 ("License Restrictions"). You will cooperate with NVIDIA and, upon NVIDIA's written request, you will confirm in writing and provide reasonably requested information to verify your compliance with the terms of this Agreement. You may not: + +2.1 Use the Software for any purpose other than the Purpose, and for clarity use of NVIDIA cuRobo apart from use with Isaac Lab is outside of the Purpose; + +2.2 Sell, rent, sublicense, transfer, distribute or otherwise make available to others (except authorized users as stated in Section 3 ("Authorized Users")) any portion of the Software, except as expressly granted in Section 1 ("License Grant"); + +2.3 Reverse engineer, decompile, or disassemble the Software components provided in binary form, nor attempt in any other manner to obtain source code of such Software; + +2.4 Modify or create derivative works of the Software; + +2.5 Change or remove copyright or other proprietary notices in the Software; + +2.6 Bypass, disable, or circumvent any technical limitation, encryption, security, digital rights management or authentication mechanism in the Software; + +2.7 Use the Software in any manner that would cause them to become subject to an open source software license, subject to the terms in Section 7 ("Components Under Other Licenses"); or + +2.8 Use the Software in violation of any applicable law or regulation in relevant jurisdictions. + +3. Authorized Users. You may allow employees and contractors of your entity or of your subsidiary(ies), and for educational institutions also enrolled students, to internally access and use the Software as authorized by this Agreement from your secure network to perform the work authorized by this Agreement on your behalf. You are responsible for the compliance with the terms of this Agreement by your authorized users. Any act or omission that if committed by you would constitute a breach of this Agreement will be deemed to constitute a breach of this Agreement if committed by your authorized users. + +4. Pre-Release. Software versions identified as alpha, beta, preview, early access or otherwise as pre-release ("Pre-Release") may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability and reliability standards relative to NVIDIA commercial offerings. You use Pre-Release Software at your own risk. NVIDIA did not design or test the Software for use in production or business-critical systems. NVIDIA may choose not to make available a commercial version of Pre-Release Software. NVIDIA may also choose to abandon development and terminate the availability of Pre-Release Software at any time without liability. + +5. Updates. NVIDIA may at any time and at its option, change, discontinue, or deprecate any part, or all, of the Software, or change or remove features or functionality, or make available patches, workarounds or other updates to the Software. Unless the updates are provided with their separate governing terms, they are deemed part of the Software licensed to you under this Agreement, and your continued use of the Software is deemed acceptance of such changes. + +6. Components Under Other Licenses. The Software may include or be distributed with components provided with separate legal notices or terms that accompany the components, such as open source software licenses and other license terms ("Other Licenses"). The components are subject to the applicable Other Licenses, including any proprietary notices, disclaimers, requirements and extended use rights; except that this Agreement will prevail regarding the use of third-party open source software, unless a third-party open source software license requires its license terms to prevail. Open source software license means any software, data or documentation subject to any license identified as an open source license by the Open Source Initiative (http://opensource.org), Free Software Foundation (http://www.fsf.org) or other similar open source organization or listed by the Software Package Data Exchange (SPDX) Workgroup under the Linux Foundation (http://www.spdx.org). + +7. Ownership. The Software, including all intellectual property rights, is and will remain the sole and exclusive property of NVIDIA or its licensors. Except as expressly granted in this Agreement, (a) NVIDIA reserves all rights, interests and remedies in connection with the Software, and (b) no other license or right is granted to you by implication, estoppel or otherwise. + +8. Feedback. You may, but you are not obligated to, provide suggestions, requests, fixes, modifications, enhancements, or other feedback regarding the Software (collectively, "Feedback"). Feedback, even if designated as confidential by you, will not create any confidentiality obligation for NVIDIA or its affiliates. If you provide Feedback, you grant NVIDIA, its affiliates and its designees a non-exclusive, perpetual, irrevocable, sublicensable, worldwide, royalty-free, fully paid-up and transferable license, under your intellectual property rights, to publicly perform, publicly display, reproduce, use, make, have made, sell, offer for sale, distribute (through multiple tiers of distribution), import, create derivative works of and otherwise commercialize and exploit the Feedback at NVIDIA's discretion. + +9. Term and Termination. + +9.1 Term and Termination for Convenience. This license ends by July 31, 2026 or earlier at your choice if you finished using the Software for the Purpose. Either party may terminate this Agreement at any time with thirty (30) days' advance written notice to the other party. + +9.2 Termination for Cause. If you commence or participate in any legal proceeding against NVIDIA with respect to the Software, this Agreement will terminate immediately without notice. Either party may terminate this Agreement for cause if: + +(a) The other party fails to cure a material breach of this Agreement within ten (10) days of the non-breaching party's written notice of the breach; or + +(b) the other party breaches its confidentiality obligations or license rights under this Agreement, which termination will be effective immediately upon written notice. + +9.3 Effect of Termination. Upon any expiration or termination of this Agreement, you will promptly stop using and return, delete or destroy NVIDIA confidential information and all Software received under this Agreement. Upon written request, you will certify in writing that you have complied with your obligations under this Section 9.3 ("Effect of Termination"). + +9.4 Survival. Section 5 ("Updates"), Section 6 ("Components Under Other Licenses"), Section 7 ("Ownership"), Section 8 ("Feedback"), Section 9.3 ("Effect of Termination"), Section 9.4 ("Survival"), Section 10 ("Disclaimer of Warranties"), Section 11 ("Limitation of Liability"), Section 12 ("Use in Mission Critical Applications"), Section 13 ("Governing Law and Jurisdiction") and Section 14 ("General") will survive any expiration or termination of this Agreement. + +10. Disclaimer of Warranties. THE SOFTWARE IS PROVIDED BY NVIDIA AS-IS AND WITH ALL FAULTS. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, NVIDIA DISCLAIMS ALL WARRANTIES AND REPRESENTATIONS OF ANY KIND, WHETHER EXPRESS, IMPLIED OR STATUTORY, RELATING TO OR ARISING UNDER THIS AGREEMENT, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF TITLE, NONINFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, USAGE OF TRADE AND COURSE OF DEALING. NVIDIA DOES NOT WARRANT OR ASSUME RESPONSIBILITY FOR THE ACCURACY OR COMPLETENESS OF ANY THIRD-PARTY INFORMATION, TEXT, GRAPHICS, LINKS CONTAINED IN THE SOFTWARE. WITHOUT LIMITING THE FOREGOING, NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS, ANY DEFECTS OR ERRORS WILL BE CORRECTED, ANY CERTAIN CONTENT WILL BE AVAILABLE; OR THAT THE SOFTWARE IS FREE OF VIRUSES OR OTHER HARMFUL COMPONENTS. NO INFORMATION OR ADVICE GIVEN BY NVIDIA WILL IN ANY WAY INCREASE THE SCOPE OF ANY WARRANTY EXPRESSLY PROVIDED IN THIS AGREEMENT. + +11. Limitations of Liability. + +11.1 EXCLUSIONS. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL NVIDIA BE LIABLE FOR ANY (I) INDIRECT, PUNITIVE, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES, OR (II) DAMAGES FOR (A) THE COST OF PROCURING SUBSTITUTE GOODS, OR (B) LOSS OF PROFITS, REVENUES, USE, DATA OR GOODWILL ARISING OUT OF OR RELATED TO THIS AGREEMENT, WHETHER BASED ON BREACH OF CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY, OR OTHERWISE, AND EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES AND EVEN IF A PARTY'S REMEDIES FAIL THEIR ESSENTIAL PURPOSE. + +11.2 DAMAGES CAP. ADDITIONALLY, TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, NVIDIA'S TOTAL CUMULATIVE AGGREGATE LIABILITY FOR ANY AND ALL LIABILITIES, OBLIGATIONS OR CLAIMS ARISING OUT OF OR RELATED TO THIS AGREEMENT WILL NOT EXCEED FIVE U.S. DOLLARS (US$5). + +12. Use in Mission Critical Applications. You acknowledge that the Software provided under this Agreement is not designed or tested by NVIDIA for use in any system or application where the use or failure of such system or application developed with NVIDIA's Software could result in injury, death or catastrophic damage (each, a "Mission Critical Application"). Examples of Mission Critical Applications include use in avionics, navigation, autonomous vehicle applications, AI solutions for automotive products, military, medical, life support or other mission-critical or life-critical applications. NVIDIA will not be liable to you or any third party, in whole or in part, for any claims or damages arising from these uses. You are solely responsible for ensuring that systems and applications developed with the Software include sufficient safety and redundancy features and comply with all applicable legal and regulatory standards and requirements. + +13. Governing Law and Jurisdiction. This Agreement will be governed in all respects by the laws of the United States and the laws of the State of Delaware, without regard to conflict of laws principles or the United Nations Convention on Contracts for the International Sale of Goods. The state and federal courts residing in Santa Clara County, California will have exclusive jurisdiction over any dispute or claim arising out of or related to this Agreement, and the parties irrevocably consent to personal jurisdiction and venue in those courts; except that either party may apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + +14. General. + +14.1 Indemnity. By using the Software you agree to defend, indemnify and hold harmless NVIDIA and its affiliates and their respective officers, directors, employees and agents from and against any claims, disputes, demands, liabilities, damages, losses, costs and expenses arising out of or in any way connected with (i) products or services that have been developed or deployed with or use the Software, or claims that they violate laws, or infringe, violate, or misappropriate any third party right; or (ii) your use of the Software in breach of the terms of this Agreement. + +14.2 Independent Contractors. The parties are independent contractors, and this Agreement does not create a joint venture, partnership, agency, or other form of business association between the parties. Neither party will have the power to bind the other party or incur any obligation on its behalf without the other party's prior written consent. Nothing in this Agreement prevents either party from participating in similar arrangements with third parties. + +14.3 No Assignment. NVIDIA may assign, delegate or transfer its rights or obligations under this Agreement by any means or operation of law. You may not, without NVIDIA's prior written consent, assign, delegate or transfer any of your rights or obligations under this Agreement by any means or operation of law, and any attempt to do so is null and void. + +14.4 No Waiver. No failure or delay by a party to enforce any term or obligation of this Agreement will operate as a waiver by that party, or prevent the enforcement of such term or obligation later. + +14.5 Trade Compliance. You agree to comply with all applicable export, import, trade and economic sanctions laws and regulations, as amended, including without limitation U.S. Export Administration Regulations and Office of Foreign Assets Control regulations. You confirm (a) your understanding that export or reexport of certain NVIDIA products or technologies may require a license or other approval from appropriate authorities and (b) that you will not export or reexport any products or technology, directly or indirectly, without first obtaining any required license or other approval from appropriate authorities, (i) to any countries that are subject to any U.S. or local export restrictions (currently including, but not necessarily limited to, Belarus, Cuba, Iran, North Korea, Russia, Syria, the Region of Crimea, Donetsk People's Republic Region and Luhansk People's Republic Region); (ii) to any end-user who you know or have reason to know will utilize them in the design, development or production of nuclear, chemical or biological weapons, missiles, rocket systems, unmanned air vehicles capable of a maximum range of at least 300 kilometers, regardless of payload, or intended for military end-use, or any weapons of mass destruction; (iii) to any end-user who has been prohibited from participating in the U.S. or local export transactions by any governing authority; or (iv) to any known military or military-intelligence end-user or for any known military or military-intelligence end-use in accordance with U.S. trade compliance laws and regulations. + +14.6 Government Rights. The Software, documentation and technology ("Protected Items") are "Commercial products" as this term is defined at 48 C.F.R. 2.101, consisting of "commercial computer software" and "commercial computer software documentation" as such terms are used in, respectively, 48 C.F.R. 12.212 and 48 C.F.R. 227.7202 & 252.227-7014(a)(1). Before any Protected Items are supplied to the U.S. Government, you will (i) inform the U.S. Government in writing that the Protected Items are and must be treated as commercial computer software and commercial computer software documentation developed at private expense; (ii) inform the U.S. Government that the Protected Items are provided subject to the terms of the Agreement; and (iii) mark the Protected Items as commercial computer software and commercial computer software documentation developed at private expense. In no event will you permit the U.S. Government to acquire rights in Protected Items beyond those specified in 48 C.F.R. 52.227-19(b)(1)-(2) or 252.227-7013(c) except as expressly approved by NVIDIA in writing. + +14.7 Notices. Please direct your legal notices or other correspondence to legalnotices@nvidia.com with a copy mailed to NVIDIA Corporation, 2788 San Tomas Expressway, Santa Clara, California 95051, United States of America, Attention: Legal Department. If NVIDIA needs to contact you, you consent to receive the notices by email and agree that such notices will satisfy any legal communication requirements. + +14.8 Severability. If a court of competent jurisdiction rules that a provision of this Agreement is unenforceable, that provision will be deemed modified to the extent necessary to make it enforceable and the remainder of this Agreement will continue in full force and effect. + +14.9 Construction. The headings in the Agreement are included solely for convenience and are not intended to affect the meaning or interpretation of the Agreement. As required by the context of the Agreement, the singular of a term includes the plural and vice versa. + +14.10 Amendment. Any amendment to this Agreement must be in writing and signed by authorized representatives of both parties. + +14.11 Entire Agreement. Regarding the subject matter of this Agreement, the parties agree that (a) this Agreement constitutes the entire and exclusive agreement between the parties and supersedes all prior and contemporaneous communications and (b) any additional or different terms or conditions, whether contained in purchase orders, order acknowledgments, invoices or otherwise, will not be binding and are null and void. + +(v. August 15, 2025) \ No newline at end of file diff --git a/docs/licenses/dependencies/isaacsim-license.txt b/docs/licenses/dependencies/isaacsim-license.txt index 80cff4a4514..0454ece1bed 100644 --- a/docs/licenses/dependencies/isaacsim-license.txt +++ b/docs/licenses/dependencies/isaacsim-license.txt @@ -1,13 +1,188 @@ -Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +The Isaac Sim software in this repository is covered under the Apache 2.0 +License terms below. -NVIDIA CORPORATION and its licensors retain all intellectual property -and proprietary rights in and to this software, related documentation -and any modifications thereto. Any use, reproduction, disclosure or -distribution of this software and related documentation without an express -license agreement from NVIDIA CORPORATION is strictly prohibited. +Building or using the software requires additional components licenced +under other terms. These additional components include dependencies such +as the Omniverse Kit SDK, as well as 3D models and textures. -Note: Licenses for assets such as Robots and Props used within these environments can be found inside their respective folders on the Nucleus server where they are hosted. +License terms for these additional NVIDIA owned and licensed components +can be found here: + https://docs.nvidia.com/NVIDIA-IsaacSim-Additional-Software-and-Materials-License.pdf -For more information: https://docs.omniverse.nvidia.com/app_isaacsim/common/NVIDIA_Omniverse_License_Agreement.html +Any open source dependencies downloaded during the build process are owned +by their respective owners and licensed under their respective terms. -For sub-dependencies of Isaac Sim: https://docs.omniverse.nvidia.com/app_isaacsim/common/licenses.html + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/pyproject.toml b/pyproject.toml index beedbd16a9c..aa5574018eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,6 +95,6 @@ reportPrivateUsage = "warning" skip = '*.usd,*.svg,*.png,_isaac_sim*,*.bib,*.css,*/_build' quiet-level = 0 # the world list should always have words in lower case -ignore-words-list = "haa,slq,collapsable,buss" +ignore-words-list = "haa,slq,collapsable,buss,reacher" # todo: this is hack to deal with incorrect spelling of "Environment" in the Isaac Sim grid world asset exclude-file = "source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py" diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py index a026b6f033f..61d4af9c3bb 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py @@ -208,8 +208,6 @@ def __init__( # Only recompute when objects are added/removed, not when poses change self._cached_object_mappings: dict[str, str] | None = None - self.counter = 0 - # ===================================================================================== # DEVICE CONVERSION UTILITIES # ===================================================================================== @@ -469,6 +467,9 @@ def update_world(self) -> None: if self.visualize_spheres: self._update_sphere_visualization(force_update=True) + if torch.cuda.is_available(): + torch.cuda.synchronize() + def _get_world_object_names(self) -> list[str]: """Extract all object names from cuRobo's collision world model. @@ -1045,7 +1046,6 @@ def plan_motion( # Compute end-effector positions for visualization ee_positions_list = [] - finger_positions_list = [] try: for i in range(len(self._current_plan.position)): js: JointState = self._current_plan[i] @@ -1053,20 +1053,6 @@ def plan_motion( ee_pos = kin.ee_position if hasattr(kin, "ee_position") else kin.ee_pose.position ee_positions_list.append(ee_pos.cpu().numpy().squeeze()) - # Path of one finger link for collision-checking visual - q = js.position - if isinstance(q, torch.Tensor): - if q.dim() == 1: - q = q.unsqueeze(0) - link_state = self.motion_gen.kinematics.get_state(q) - names = link_state.link_names - # Use first hand link name from config for finger visualization - finger_link_name = self.config.hand_link_names[0] if self.config.hand_link_names else None - if finger_link_name and finger_link_name in names: - idx = names.index(finger_link_name) - lf = link_state.links_position[0, idx, :].cpu().numpy() - finger_positions_list.append(lf) - if self.debug and len(ee_positions_list) > 0: print("Link names from kinematics:", kin.link_names) @@ -1074,15 +1060,9 @@ def plan_motion( if self.debug: print(f"Failed to compute EE positions for visualization: {e}") ee_positions_list = None - finger_positions_list = None try: - if self.counter == 0: - import time - - time.sleep(5) world_scene = WorldConfig.get_scene_graph(self.motion_gen.world_coll_checker.world_model) - self.counter += 1 except Exception: world_scene = None @@ -1093,7 +1073,6 @@ def plan_motion( robot_spheres=robot_spheres, attached_spheres=attached_spheres, ee_positions=np.array(ee_positions_list) if ee_positions_list else None, - finger_positions=np.array(finger_positions_list) if finger_positions_list else None, frame_duration=0.1, world_scene=world_scene, ) @@ -1119,8 +1098,6 @@ def _plan_to_contact_pose( start_state: JointState, goal_pose: Pose, contact: bool = True, - retime_plan: bool = False, - step_size: float | None = None, ) -> bool: """Plan motion with configurable collision checking for contact scenarios. @@ -1198,12 +1175,6 @@ def _plan_to_contact_pose( self._current_plan = self._current_plan.get_ordered_joint_state(common_js_names) self._plan_index = 0 - if retime_plan and step_size is not None: - original_length: int = len(self._current_plan.position) - self._current_plan = self._linearly_retime_plan(step_size=step_size, plan=self._current_plan) - if self.debug: - print(f"Retimed plan from {original_length} to {len(self._current_plan.position)} waypoints") - planning_success = True if self.debug: print(f"Contact planning succeeded with {len(self._current_plan.position)} waypoints") @@ -1310,8 +1281,6 @@ def _plan_to_contact( start_state=current_state, goal_pose=target_pose, contact=contact_flag, - retime_plan=False, - step_size=None, ) if not success: @@ -1351,7 +1320,6 @@ def _linearly_retime_plan( self, step_size: float = 0.01, plan: JointState | None = None, - n_repeat: int | None = None, ) -> JointState | None: """Apply linear retiming to trajectory for consistent execution speed. @@ -1407,7 +1375,7 @@ def _linearly_retime_plan( # Linear interpolation indices = torch.searchsorted(cum_distances, sampled_distances) - indices = torch.clamp(indices, 0, len(cum_distances) - 1) + indices = torch.clamp(indices, 1, len(cum_distances) - 1) # Get interpolation weights weights = (sampled_distances - cum_distances[indices - 1]) / ( @@ -1476,6 +1444,9 @@ def reset_plan(self) -> None: """ self._plan_index = 0 self._current_plan = None + if self.visualize_plan and hasattr(self, "plan_visualizer"): + self.plan_visualizer._clear_visualization() + self.plan_visualizer.mark_idle() def get_planned_poses(self) -> list[torch.Tensor]: """Extract all end-effector poses from current trajectory. @@ -1672,12 +1643,11 @@ def _create_sphere_config(self, sphere_idx: int, sphere, robot_link_count: int) from isaaclab.sim.spawners.meshes import MeshSphereCfg is_attached = sphere_idx >= robot_link_count - color = (0.8, 0.2, 0.0) if is_attached else (0.0, 0.8, 0.2) + color = (1.0, 0.5, 0.0) if is_attached else (0.0, 1.0, 0.0) opacity = 0.9 if is_attached else 0.5 - # Calculate position - env_origin = self.env.scene.env_origins[self.env_id, :3] - root_translation = (self.robot.data.root_pos_w[self.env_id, :3] - env_origin).detach().cpu().numpy() + # Calculate position in world frame (do not use env_origin) + root_translation = (self.robot.data.root_pos_w[self.env_id, :3]).detach().cpu().numpy() position = sphere.position.cpu().numpy() if hasattr(sphere.position, "cpu") else sphere.position if not is_attached: position = position + root_translation @@ -1685,7 +1655,8 @@ def _create_sphere_config(self, sphere_idx: int, sphere, robot_link_count: int) return { "position": position, "cfg": MeshSphereCfg( - radius=float(sphere.radius), visual_material=PreviewSurfaceCfg(diffuse_color=color, opacity=opacity) + radius=float(sphere.radius), + visual_material=PreviewSurfaceCfg(diffuse_color=color, opacity=opacity, emissive_color=color), ), } @@ -1762,6 +1733,7 @@ def update_world_and_plan_motion( gripper_closed = expected_attached_object is not None self._set_gripper_state(gripper_closed) current_attached = self.get_attached_objects() + gripper_pos = self.robot.data.joint_pos[env_id, -2:] if self.debug: print(f"Current attached objects: {current_attached}") @@ -1801,8 +1773,6 @@ def update_world_and_plan_motion( print(f"Distance EE to object: {distance:.4f}") # Debug gripper state - robot = self.env.scene["robot"] - gripper_pos = robot.data.joint_pos[env_id, -2:] gripper_open_val = self.config.grasp_gripper_open_val print(f"Gripper positions: {gripper_pos}") print(f"Gripper open val: {gripper_open_val}") @@ -1813,14 +1783,14 @@ def update_world_and_plan_motion( print(f"Is grasped check result: {is_grasped}") if is_grasped: - if self.debug: - print(f"Attaching {expected_attached_object}") self._attach_object(expected_attached_object, expected_path, env_id) - print(f"Attached {expected_attached_object}") + if self.debug: + print(f"Attached {expected_attached_object}") else: if self.debug: - print("Object not detected as grasped - attachment skipped") - print("This will cause collision with ghost object!") + print( + "Object not detected as grasped - attachment skipped" + ) # This will cause collision with ghost object! else: if self.debug: print(f"Object {expected_attached_object} not found in world mappings") diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py index 3b0babbabf0..a3a0ee9061f 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py @@ -3,43 +3,61 @@ # # SPDX-License-Identifier: Apache-2.0 +import os +import tempfile +import yaml from dataclasses import dataclass, field from curobo.geom.sdf.world import CollisionCheckerType from curobo.geom.types import WorldConfig -from curobo.util_file import get_world_configs_path, join_path, load_yaml +from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path @dataclass class CuroboPlannerConfig: - """Configuration for CuRobo motion planner.""" + """Configuration for CuRobo motion planner. + + This dataclass provides a flexible configuration system for the CuRobo motion planner. + The base configuration is robot-agnostic, with factory methods providing pre-configured + settings for specific robots and tasks. + + Example Usage: + >>> # Use a pre-configured robot + >>> config = CuroboPlannerConfig.franka_config() + >>> + >>> # Or create from task name + >>> config = CuroboPlannerConfig.from_task_name("Isaac-Stack-Cube-Franka-v0") + >>> + >>> # Initialize planner with config + >>> planner = CuroboPlanner(env, robot, config) + + To add support for a new robot, see the factory methods section below for detailed instructions. + """ # Robot configuration - robot_config_file: str = "franka.yml" + robot_config_file: str | None = None """cuRobo robot configuration file (path defined by curobo api).""" - robot_name: str = "franka" + robot_name: str = "" """Robot name for visualization and identification.""" ee_link_name: str | None = None """End-effector link name (auto-detected from robot config if None).""" # Gripper configuration - gripper_joint_names: list[str] = field(default_factory=lambda: ["panda_finger_joint1", "panda_finger_joint2"]) + gripper_joint_names: list[str] = field(default_factory=list) """Names of gripper joints.""" - gripper_open_positions: dict[str, float] = field( - default_factory=lambda: {"panda_finger_joint1": 0.04, "panda_finger_joint2": 0.04} - ) + gripper_open_positions: dict[str, float] = field(default_factory=dict) """Open gripper positions for cuRobo to update spheres""" - gripper_closed_positions: dict[str, float] = field( - default_factory=lambda: {"panda_finger_joint1": 0.008, "panda_finger_joint2": 0.008} - ) + gripper_closed_positions: dict[str, float] = field(default_factory=dict) """Closed gripper positions for cuRobo to update spheres""" # Hand link configuration (for contact planning) - hand_link_names: list[str] = field(default_factory=lambda: ["panda_leftfinger", "panda_rightfinger", "panda_hand"]) + hand_link_names: list[str] = field(default_factory=list) """Names of hand/finger links to disable during contact planning.""" # Attachment configuration @@ -160,10 +178,9 @@ def _get_world_config_with_table_adjustment(self) -> WorldConfig: # Load the base world config world_cfg_table = WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), self.world_config_file))) - # Adjust table height if cuboid exists - if world_cfg_table.cuboid is not None: - if len(world_cfg_table.cuboid) > 0: - world_cfg_table.cuboid[0].pose[2] -= 0.02 + # Adjust table height if cuboid exists and has a pose + if world_cfg_table.cuboid and len(world_cfg_table.cuboid) > 0 and world_cfg_table.cuboid[0].pose: + world_cfg_table.cuboid[0].pose[2] -= 0.02 # Get mesh world for additional collision objects world_cfg_mesh = WorldConfig.from_dict( @@ -171,20 +188,179 @@ def _get_world_config_with_table_adjustment(self) -> WorldConfig: ).get_mesh_world() # Adjust mesh configuration if it exists - if world_cfg_mesh.mesh is not None: - if len(world_cfg_mesh.mesh) > 0: - world_cfg_mesh.mesh[0].name += "_mesh" - world_cfg_mesh.mesh[0].pose[2] = -10.5 # Move mesh below scene + if world_cfg_mesh.mesh and len(world_cfg_mesh.mesh) > 0: + mesh_obj = world_cfg_mesh.mesh[0] + if mesh_obj.name: + mesh_obj.name += "_mesh" + if mesh_obj.pose: + mesh_obj.pose[2] = -10.5 # Move mesh below scene # Combine cuboid and mesh worlds world_cfg = WorldConfig(cuboid=world_cfg_table.cuboid, mesh=world_cfg_mesh.mesh) return world_cfg + @staticmethod + def _create_temp_robot_yaml(base_yaml: str, urdf_path: str) -> str: + """Create a temporary robot configuration YAML with custom URDF path. + + Args: + base_yaml: Base robot configuration file name + urdf_path: Absolute path to the URDF file + + Returns: + Path to the temporary YAML file + + Raises: + FileNotFoundError: If the URDF file doesn't exist + """ + # Validate URDF path + if not os.path.isabs(urdf_path) or not os.path.isfile(urdf_path): + raise FileNotFoundError(f"URDF must be a local file: {urdf_path}") + + # Load base configuration + robot_cfg_path = get_robot_configs_path() + base_path = join_path(robot_cfg_path, base_yaml) + data = load_yaml(base_path) + print(f"urdf_path: {urdf_path}") + # Update URDF path + data["robot_cfg"]["kinematics"]["urdf_path"] = urdf_path + + # Write to temporary file + tmp_dir = tempfile.mkdtemp(prefix="curobo_robot_cfg_") + out_path = os.path.join(tmp_dir, base_yaml) + with open(out_path, "w") as f: + yaml.safe_dump(data, f, sort_keys=False) + + return out_path + + # ===================================================================================== + # FACTORY METHODS FOR ROBOT CONFIGURATIONS + # ===================================================================================== + """ + Creating Custom Robot Configurations + ===================================== + + To create a configuration for your own robot, follow these steps: + + 1. Create a Factory Method + --------------------------- + Define a classmethod that returns a configured instance: + + .. code-block:: python + + @classmethod + def my_robot_config(cls) -> "CuroboPlannerConfig": + # Option 1: Download from Nucleus (like Franka example) + urdf_path = f"{ISAACLAB_NUCLEUS_DIR}/path/to/my_robot.urdf" + local_urdf = retrieve_file_path(urdf_path, force_download=True) + + # Option 2: Use local file directly + # local_urdf = "/absolute/path/to/my_robot.urdf" + + # Create temporary YAML with custom URDF path + robot_cfg_file = cls._create_temp_robot_yaml("my_robot.yml", local_urdf) + + return cls( + # Required: Specify robot configuration file + robot_config_file=robot_cfg_file, # Use the generated YAML with custom URDF + robot_name="my_robot", + + # Gripper configuration (if robot has grippers) + gripper_joint_names=["gripper_left", "gripper_right"], + gripper_open_positions={"gripper_left": 0.05, "gripper_right": 0.05}, + gripper_closed_positions={"gripper_left": 0.01, "gripper_right": 0.01}, + + # Hand/finger links to disable during contact planning + hand_link_names=["finger_link_1", "finger_link_2", "palm_link"], + + # Optional: Custom collision spheres configuration + collision_spheres_file="spheres/my_robot_spheres.yml", # Path relative to curobo (can override with custom spheres file) + + # Grasp detection threshold + grasp_gripper_open_val=0.05, + + # Motion planning parameters (tune for your robot) + approach_distance=0.05, # Distance to approach before grasping + retreat_distance=0.05, # Distance to retreat after grasping + time_dilation_factor=0.5, # Speed factor (0.5 = half speed) + + # Visualization options + visualize_spheres=False, + visualize_plan=False, + debug_planner=False, + ) + + 2. Task-Specific Configurations + -------------------------------- + For task-specific variants, create methods that modify the base config: + + .. code-block:: python + + @classmethod + def my_robot_pick_place_config(cls) -> "CuroboPlannerConfig": + config = cls.my_robot_config() # Start from base config + + # Override for pick-and-place tasks + config.approach_distance = 0.08 + config.retreat_distance = 0.10 + config.enable_finetune_trajopt = True + config.collision_activation_distance = 0.02 + + # Custom world configuration if needed + config.get_world_config = lambda: config._get_world_config_with_table_adjustment() + + return config + + 3. Register in from_task_name() + -------------------------------- + Add your robot detection logic to the from_task_name method: + + .. code-block:: python + + @classmethod + def from_task_name(cls, task_name: str) -> "CuroboPlannerConfig": + task_lower = task_name.lower() + + # Add your robot detection + if "my-robot" in task_lower: + if "pick-place" in task_lower: + return cls.my_robot_pick_place_config() + else: + return cls.my_robot_config() + + # ... existing robot checks ... + + Important Notes + --------------- + - The _create_temp_robot_yaml() helper creates a temporary YAML with your custom URDF + - If using Nucleus assets, retrieve_file_path() downloads them to a local temp directory + - The base robot YAML (e.g., "my_robot.yml") should exist in cuRobo's robot configs + + Best Practices + -------------- + 1. Start with conservative parameters (slow speed, large distances) + 2. Test with visualization enabled (visualize_plan=True) for debugging + 3. Tune collision_activation_distance based on controller precision to follow collision-free motion + 4. Adjust sphere counts in extra_collision_spheres for attached objects + 5. Use debug_planner=True when developing new configurations + """ + @classmethod def franka_config(cls) -> "CuroboPlannerConfig": - """Create configuration for Franka Panda robot.""" + """Create configuration for Franka Panda robot. + + This method uses a custom URDF from Nucleus for the Franka robot. + + Returns: + CuroboPlannerConfig: Configuration for Franka robot + """ + urdf_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/SkillGenAssets/FrankaPanda/franka_panda.urdf" + local_urdf = retrieve_file_path(urdf_path, force_download=True) + + robot_cfg_file = cls._create_temp_robot_yaml("franka.yml", local_urdf) + return cls( - robot_config_file="franka.yml", + robot_config_file=robot_cfg_file, robot_name="franka", gripper_joint_names=["panda_finger_joint1", "panda_finger_joint2"], gripper_open_positions={"panda_finger_joint1": 0.04, "panda_finger_joint2": 0.04}, @@ -196,7 +372,7 @@ def franka_config(cls) -> "CuroboPlannerConfig": retreat_distance=0.0, max_planning_attempts=1, time_dilation_factor=0.6, - enable_finetune_trajopt=False, + enable_finetune_trajopt=True, n_repeat=None, motion_step_size=None, visualize_spheres=False, @@ -211,53 +387,29 @@ def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": """Create configuration for Franka stacking cube in a bin.""" config = cls.franka_config() config.gripper_closed_positions = {"panda_finger_joint1": 0.024, "panda_finger_joint2": 0.024} - config.grasp_gripper_open_val = 0.04 config.approach_distance = 0.05 config.retreat_distance = 0.07 config.surface_sphere_radius = 0.01 - config.debug_planner = True + config.debug_planner = False config.collision_activation_distance = 0.02 - config.visualize_plan = True + config.visualize_plan = False config.enable_finetune_trajopt = True config.motion_noise_scale = 0.02 config.get_world_config = lambda: config._get_world_config_with_table_adjustment() return config - @classmethod - def franka_stack_square_nut_config(cls) -> "CuroboPlannerConfig": - """Create configuration for Franka stacking a square nut.""" - config = cls.franka_config() - config.gripper_closed_positions = {"panda_finger_joint1": 0.021, "panda_finger_joint2": 0.021} - config.grasp_gripper_open_val = 0.04 - config.approach_distance = 0.11 - config.retreat_distance = 0.11 - config.extra_collision_spheres = {"attached_object": 200} - config.surface_sphere_radius = 0.005 - config.n_repeat = None - config.motion_step_size = None - config.visualize_spheres = False - config.visualize_plan = True - config.debug_planner = True - config.motion_noise_scale = 0.0 - config.time_dilation_factor = 0.4 - config.get_world_config = lambda: config._get_world_config_with_table_adjustment() - return config - @classmethod def franka_stack_cube_config(cls) -> "CuroboPlannerConfig": """Create configuration for Franka stacking a normal cube.""" config = cls.franka_config() - config.n_repeat = None - config.motion_step_size = None config.visualize_spheres = False - config.visualize_plan = False - config.debug_planner = True - config.motion_noise_scale = 0.0 - config.motion_step_size = None - config.n_repeat = None + config.visualize_plan = True + config.debug_planner = False + config.motion_noise_scale = 0.02 config.collision_activation_distance = 0.01 config.approach_distance = 0.05 config.retreat_distance = 0.05 + config.surface_sphere_radius = 0.01 config.get_world_config = lambda: config._get_world_config_with_table_adjustment() return config @@ -275,8 +427,6 @@ def from_task_name(cls, task_name: str) -> "CuroboPlannerConfig": if "stack-cube-bin" in task_lower: return cls.franka_stack_cube_bin_config() - elif "stack-square-nut" in task_lower: - return cls.franka_stack_square_nut_config() elif "stack-cube" in task_lower: return cls.franka_stack_cube_config() else: diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py index b231e621434..ecfed2bc101 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -370,7 +370,6 @@ def visualize_plan( robot_spheres: list[Any] | None = None, attached_spheres: list[Any] | None = None, ee_positions: np.ndarray | None = None, - finger_positions: np.ndarray | None = None, frame_duration: float = 0.1, world_scene: Optional["trimesh.Scene"] = None, ) -> None: @@ -382,7 +381,6 @@ def visualize_plan( robot_spheres: Optional list of robot collision spheres attached_spheres: Optional list of attached object spheres ee_positions: Optional end-effector positions - finger_positions: Optional finger positions frame_duration: Duration between frames in seconds world_scene: Optional world scene to visualize """ @@ -412,7 +410,7 @@ def visualize_plan( self._visualize_target_pose(target_pose) # Visualize trajectory (end-effector positions if provided) - self._visualize_trajectory(plan, frame_duration, ee_positions, finger_positions) + self._visualize_trajectory(plan, frame_duration, ee_positions) # Visualize spheres if provided if robot_spheres: @@ -428,10 +426,8 @@ def _clear_visualization(self) -> None: # Clear dynamic trajectory, target, and finger logs to avoid artifacts between visualizations dynamic_paths = [ "trajectory", - "finger_trajectory", - "trajectory", # keyframe children cleared recursively - "finger", # keyframe children cleared recursively "target", + "anim", ] for path in dynamic_paths: @@ -489,7 +485,6 @@ def _visualize_trajectory( plan: JointState, frame_duration: float, ee_positions: np.ndarray | None = None, - finger_positions: np.ndarray | None = None, ) -> None: """Visualize the robot trajectory. @@ -497,7 +492,6 @@ def _visualize_trajectory( plan: Joint state trajectory frame_duration: Duration between frames in seconds ee_positions: Optional end-effector positions - finger_positions: Optional finger positions """ if ee_positions is None: raw = plan.position.detach().cpu().numpy() if torch.is_tensor(plan.position) else np.array(plan.position) @@ -519,6 +513,7 @@ def _visualize_trajectory( colors=[[0, 100, 255]], # Blue radii=[0.005], ), + static=True, ) # Log keyframes @@ -530,31 +525,9 @@ def _visualize_trajectory( colors=[[0, 100, 255]], # Blue radii=[0.01], ), + static=True, ) - # Log finger path if provided - if finger_positions is not None: - if len(finger_positions) == 0: - return - try: - rr.log( - "world/finger_trajectory", - rr.LineStrips3D( - [finger_positions], - colors=[[0, 0, 255]], # Blue - radii=[0.005], - ), - ) - for i, pos in enumerate(finger_positions): - rr.log( - f"world/finger/keyframe_{i}", - rr.Points3D( - positions=np.array([pos + self._base_translation]), colors=[[0, 0, 255]], radii=[0.01] - ), - ) - except Exception as e: - print(f"Error visualizing finger trajectory: {e}") - def _visualize_robot_spheres(self, spheres: list[Any]) -> None: """Visualize robot collision spheres. @@ -948,23 +921,16 @@ def set_motion_generator_reference(self, motion_gen: Any) -> None: """ self._motion_gen_ref = motion_gen - def restore_static_spheres(self) -> None: - """Restore static sphere visualization after animation.""" - # Note: This would require re-calling visualize_plan to recreate the static spheres - if self.debug: - print("To restore static spheres, call visualize_plan() again") - - def clear_animation_spheres(self) -> None: - """Clear animated sphere visualization.""" - rr.log("world/robot_animation", rr.Clear(recursive=True)) - rr.log("world/attached_animation", rr.Clear(recursive=True)) - - if self.debug: - print("Cleared animation spheres") - - def clear_attached_animation_spheres(self) -> None: - """Clear only attached object animation spheres.""" - rr.log("world/attached_animation", rr.Clear(recursive=True)) - - if self.debug: - print("Cleared attached object animation spheres") + def mark_idle(self) -> None: + # Advance plan timeline and emit empty anim so latest frame is blank + rr.set_time("plan", sequence=self._current_frame) + self._current_frame += 1 + empty = np.empty((0, 3), dtype=float) + rr.log("world/anim/ee", rr.Points3D(positions=empty)) + rr.log("world/robot_animation", rr.Points3D(positions=empty)) + rr.log("world/attached_animation", rr.Points3D(positions=empty)) + + # Also advance sphere animation timeline + rr.set_time("sphere_animation", sequence=self._current_frame) + rr.log("world/robot_animation", rr.Points3D(positions=empty)) + rr.log("world/attached_animation", rr.Points3D(positions=empty)) From 531582d93260192fe70ab9a8a339d044be8e0894 Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Wed, 27 Aug 2025 16:31:51 -0700 Subject: [PATCH 4/6] Added data gen test for skillgen, and changes to planner and visualizer --- docs/licenses/dependencies/cuRobo-license.txt | 2 +- .../motion_planners/curobo/curobo_planner.py | 4 +- .../curobo/curobo_planner_config.py | 5 +- .../motion_planners/curobo/plan_visualizer.py | 23 ++--- .../test/test_generate_dataset_skillgen.py | 91 +++++++++++++++++++ 5 files changed, 107 insertions(+), 18 deletions(-) create mode 100644 source/isaaclab_mimic/test/test_generate_dataset_skillgen.py diff --git a/docs/licenses/dependencies/cuRobo-license.txt b/docs/licenses/dependencies/cuRobo-license.txt index 70b7c6fb67b..2b76a56cbf8 100644 --- a/docs/licenses/dependencies/cuRobo-license.txt +++ b/docs/licenses/dependencies/cuRobo-license.txt @@ -90,4 +90,4 @@ If you don't accept all the terms and conditions below, do not use the Software. 14.11 Entire Agreement. Regarding the subject matter of this Agreement, the parties agree that (a) this Agreement constitutes the entire and exclusive agreement between the parties and supersedes all prior and contemporaneous communications and (b) any additional or different terms or conditions, whether contained in purchase orders, order acknowledgments, invoices or otherwise, will not be binding and are null and void. -(v. August 15, 2025) \ No newline at end of file +(v. August 15, 2025) diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py index 61d4af9c3bb..5b5eb961c54 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py @@ -1073,7 +1073,6 @@ def plan_motion( robot_spheres=robot_spheres, attached_spheres=attached_spheres, ee_positions=np.array(ee_positions_list) if ee_positions_list else None, - frame_duration=0.1, world_scene=world_scene, ) @@ -1087,7 +1086,6 @@ def plan_motion( robot_spheres_at_start=robot_spheres, attached_spheres_at_start=attached_spheres, timeline="sphere_animation", - frame_duration=0.1, interpolation_steps=15, # More steps for smoother animation ) @@ -1445,7 +1443,7 @@ def reset_plan(self) -> None: self._plan_index = 0 self._current_plan = None if self.visualize_plan and hasattr(self, "plan_visualizer"): - self.plan_visualizer._clear_visualization() + self.plan_visualizer.clear_visualization() self.plan_visualizer.mark_idle() def get_planned_poses(self) -> list[torch.Tensor]: diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py index a3a0ee9061f..a3c5d26edf9 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py @@ -392,7 +392,7 @@ def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": config.surface_sphere_radius = 0.01 config.debug_planner = False config.collision_activation_distance = 0.02 - config.visualize_plan = False + config.visualize_plan = True config.enable_finetune_trajopt = True config.motion_noise_scale = 0.02 config.get_world_config = lambda: config._get_world_config_with_table_adjustment() @@ -402,8 +402,7 @@ def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": def franka_stack_cube_config(cls) -> "CuroboPlannerConfig": """Create configuration for Franka stacking a normal cube.""" config = cls.franka_config() - config.visualize_spheres = False - config.visualize_plan = True + config.visualize_plan = False config.debug_planner = False config.motion_noise_scale = 0.02 config.collision_activation_distance = 0.01 diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py index ecfed2bc101..d9ebbaeb157 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -370,7 +370,6 @@ def visualize_plan( robot_spheres: list[Any] | None = None, attached_spheres: list[Any] | None = None, ee_positions: np.ndarray | None = None, - frame_duration: float = 0.1, world_scene: Optional["trimesh.Scene"] = None, ) -> None: """Visualize a complete motion plan with all components. @@ -381,7 +380,6 @@ def visualize_plan( robot_spheres: Optional list of robot collision spheres attached_spheres: Optional list of attached object spheres ee_positions: Optional end-effector positions - frame_duration: Duration between frames in seconds world_scene: Optional world scene to visualize """ if self.debug: @@ -410,7 +408,7 @@ def visualize_plan( self._visualize_target_pose(target_pose) # Visualize trajectory (end-effector positions if provided) - self._visualize_trajectory(plan, frame_duration, ee_positions) + self._visualize_trajectory(plan, ee_positions) # Visualize spheres if provided if robot_spheres: @@ -439,6 +437,10 @@ def _clear_visualization(self) -> None: self._sphere_entities[entity_type] = [] self._current_frame = 0 + def clear_visualization(self) -> None: + """Public method to clear the visualization.""" + self._clear_visualization() + def _visualize_target_pose(self, target_pose: torch.Tensor) -> None: """Visualize the target end-effector pose. @@ -473,24 +475,19 @@ def _visualize_target_pose(self, target_pose: torch.Tensor) -> None: "world/target/frame", rr.Transform3D( translation=pos_np, - rotation=rr.RotationAxisAngle( - axis=[0, 0, 1], - angle=np.arccos(rot_np[0, 0]) * 2, - ), + mat3x3=rot_np, ), ) def _visualize_trajectory( self, plan: JointState, - frame_duration: float, ee_positions: np.ndarray | None = None, ) -> None: """Visualize the robot trajectory. Args: plan: Joint state trajectory - frame_duration: Duration between frames in seconds ee_positions: Optional end-effector positions """ if ee_positions is None: @@ -715,7 +712,6 @@ def animate_spheres_along_path( robot_spheres_at_start: list[Any] | None = None, attached_spheres_at_start: list[Any] | None = None, timeline: str = "sphere_animation", - frame_duration: float = 0.1, interpolation_steps: int = 10, ) -> None: """Animate robot and attached object spheres along the planned trajectory with smooth interpolation. @@ -728,7 +724,6 @@ def animate_spheres_along_path( robot_spheres_at_start: Initial robot collision spheres (for reference) attached_spheres_at_start: Initial attached object spheres (for reference) timeline: Name of the Rerun timeline for the animation - frame_duration: Duration between frames in seconds interpolation_steps: Number of interpolated steps between each waypoint pair """ if plan is None or len(plan.position) == 0: @@ -922,6 +917,12 @@ def set_motion_generator_reference(self, motion_gen: Any) -> None: self._motion_gen_ref = motion_gen def mark_idle(self) -> None: + """Signal that the planner is idle, clearing animations. + + This method advances the animation timelines and logs empty data to ensure that + no leftover visualizations from the previous plan are shown. It's useful for + creating a clean state between planning episodes. + """ # Advance plan timeline and emit empty anim so latest frame is blank rr.set_time("plan", sequence=self._current_frame) self._current_frame += 1 diff --git a/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py b/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py new file mode 100644 index 00000000000..846604a1c0c --- /dev/null +++ b/source/isaaclab_mimic/test/test_generate_dataset_skillgen.py @@ -0,0 +1,91 @@ +# Copyright (c) 2024-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +"""Test dataset generation with SkillGen for Isaac Lab Mimic workflow.""" + +from isaaclab.app import AppLauncher + +# Launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os +import subprocess +import tempfile + +import pytest + +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +DATASETS_DOWNLOAD_DIR = tempfile.mkdtemp(suffix="_Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0") +NUCLEUS_SKILLGEN_ANNOTATED_DATASET_PATH = os.path.join( + ISAACLAB_NUCLEUS_DIR, "Mimic", "franka_stack_datasets", "annotated_dataset_skillgen.hdf5" +) + + +@pytest.fixture +def setup_skillgen_test_environment(): + """Prepare environment for SkillGen dataset generation test.""" + # Create the datasets directory if it does not exist + if not os.path.exists(DATASETS_DOWNLOAD_DIR): + print("Creating directory : ", DATASETS_DOWNLOAD_DIR) + os.makedirs(DATASETS_DOWNLOAD_DIR) + + # Download the SkillGen annotated dataset from Nucleus into DATASETS_DOWNLOAD_DIR + retrieve_file_path(NUCLEUS_SKILLGEN_ANNOTATED_DATASET_PATH, DATASETS_DOWNLOAD_DIR) + + # Set the environment variable PYTHONUNBUFFERED to 1 to get all text outputs in result.stdout + pythonunbuffered_env_var_ = os.environ.get("PYTHONUNBUFFERED") + os.environ["PYTHONUNBUFFERED"] = "1" + + # Automatically detect the workflow root (backtrack from current file location) + current_dir = os.path.dirname(os.path.abspath(__file__)) + workflow_root = os.path.abspath(os.path.join(current_dir, "../../..")) + + # Yield the workflow root for use in tests + yield workflow_root + + # Cleanup: restore the original environment variable + if pythonunbuffered_env_var_: + os.environ["PYTHONUNBUFFERED"] = pythonunbuffered_env_var_ + else: + del os.environ["PYTHONUNBUFFERED"] + + +def test_generate_dataset_skillgen(setup_skillgen_test_environment): + """Test dataset generation with SkillGen enabled.""" + workflow_root = setup_skillgen_test_environment + + input_file = os.path.join(DATASETS_DOWNLOAD_DIR, "annotated_dataset_skillgen.hdf5") + output_file = os.path.join(DATASETS_DOWNLOAD_DIR, "generated_dataset_skillgen.hdf5") + + command = [ + workflow_root + "/isaaclab.sh", + "-p", + os.path.join(workflow_root, "scripts/imitation_learning/isaaclab_mimic/generate_dataset.py"), + "--device", + "cpu", + "--input_file", + input_file, + "--output_file", + output_file, + "--num_envs", + "1", + "--generation_num_trials", + "1", + "--use_skillgen", + "--headless", + "--task", + "Isaac-Stack-Cube-Franka-IK-Rel-Skillgen-v0", + ] + + result = subprocess.run(command, capture_output=True, text=True) + + print("SkillGen dataset generation result:") + print(result.stdout) + print(result.stderr) + + assert result.returncode == 0, result.stderr + expected_output = "successes/attempts. Exiting" + assert expected_output in result.stdout From ebca0ae9318f5c08df3af5a483648b36825b67ec Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Thu, 28 Aug 2025 14:39:56 -0700 Subject: [PATCH 5/6] Minor change in README regarding isaacsim license file --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 19b157fa82d..e13008da7c9 100644 --- a/README.md +++ b/README.md @@ -188,7 +188,7 @@ where creativity and technology intersect. Your contributions can make a meaning The Isaac Lab framework is released under [BSD-3 License](LICENSE). The `isaaclab_mimic` extension and its corresponding standalone scripts are released under [Apache 2.0](LICENSE-mimic). The license files of its dependencies and assets are present in the [`docs/licenses`](docs/licenses) directory. -Note that Isaac Lab requires Isaac Sim, which includes components under proprietary licensing terms. Please see the [Isaac Sim license](https://github.com/isaac-sim/IsaacSim/blob/main/LICENSE) for information on Isaac Sim licensing. +Note that Isaac Lab requires Isaac Sim, which includes components under proprietary licensing terms. Please see the [Isaac Sim license](docs/licenses/dependencies/isaacsim-license.txt) for information on Isaac Sim licensing. Note that the `isaaclab_mimic` extension requires cuRobo, which has proprietary licensing terms that can be found in [`docs/licenses/dependencies/cuRobo-license.txt`](docs/licenses/dependencies/cuRobo-license.txt). From ada1a3a6c6a4bda42ac9748e98c3ec66a41dc8f6 Mon Sep 17 00:00:00 2001 From: Neel Jawale Date: Fri, 29 Aug 2025 15:56:44 -0700 Subject: [PATCH 6/6] Addressing review comments: type hints, conditional simplification, removing redundancies --- .../isaaclab_mimic/generate_dataset.py | 2 +- .../datagen/datagen_info_pool.py | 15 +- .../motion_planners/base_motion_planner.py | 2 +- .../motion_planners/curobo/curobo_planner.py | 175 ++++++++---------- .../curobo/curobo_planner_config.py | 8 +- .../motion_planners/curobo/plan_visualizer.py | 6 +- .../test/test_curobo_planner_cube_stack.py | 5 +- .../curobo/test/test_curobo_planner_franka.py | 13 +- 8 files changed, 105 insertions(+), 121 deletions(-) diff --git a/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py b/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py index d1f4d065e35..97088c5e73a 100644 --- a/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py +++ b/scripts/imitation_learning/isaaclab_mimic/generate_dataset.py @@ -192,7 +192,7 @@ def main(): # Cleanup of motion planners and their visualizers if motion_planners is not None: for env_id, planner in motion_planners.items(): - if hasattr(planner, "plan_visualizer") and planner.plan_visualizer is not None: + if getattr(planner, "plan_visualizer", None) is not None: print(f"Closing plan visualizer for environment {env_id}") planner.plan_visualizer.close() planner.plan_visualizer = None diff --git a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py index ca95299d8ea..510c23786b4 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py +++ b/source/isaaclab_mimic/isaaclab_mimic/datagen/datagen_info_pool.py @@ -191,17 +191,10 @@ def _add_episode(self, episode: EpisodeData): eef_subtask_boundaries[i - 1][1] + prev_max_offset_range < eef_subtask_boundaries[i][1] + self.subtask_term_offset_ranges[eef_name][i][0] ), ( - "subtask sanity check violation in demo with subtask {} end ind {}, subtask {} max offset {}," - " subtask {} end ind {}, and subtask {} min offset {}".format( - i - 1, - eef_subtask_boundaries[i - 1][1], - i - 1, - prev_max_offset_range, - i, - eef_subtask_boundaries[i][1], - i, - self.subtask_term_offset_ranges[eef_name][i][0], - ) + f"subtask sanity check violation in demo with subtask {i - 1} end ind" + f" {eef_subtask_boundaries[i - 1][1]}, subtask {i - 1} max offset {prev_max_offset_range}," + f" subtask {i} end ind {eef_subtask_boundaries[i][1]}, and subtask {i} min offset" + f" {self.subtask_term_offset_ranges[eef_name][i][0]}" ) self._subtask_boundaries[eef_name].append(eef_subtask_boundaries) diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py index 1caa7bc8b08..e82df7c33f0 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/base_motion_planner.py @@ -50,7 +50,7 @@ def __init__( self.debug = debug @abstractmethod - def update_world_and_plan_motion(self, target_pose: torch.Tensor, **kwargs) -> bool: + def update_world_and_plan_motion(self, target_pose: torch.Tensor, **kwargs: Any) -> bool: """Update collision world and plan motion to target pose. This is the main entry point for motion planning. It should: diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py index 5b5eb961c54..6002d13ad16 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner.py @@ -8,6 +8,7 @@ from dataclasses import dataclass from typing import Any +from curobo.cuda_robot_model.cuda_robot_model import CudaRobotModelState from curobo.geom.sdf.world import CollisionCheckerType from curobo.geom.sphere_fit import SphereFitType from curobo.geom.types import WorldConfig @@ -245,7 +246,7 @@ def _to_env_device(self, tensor: torch.Tensor) -> torch.Tensor: # INITIALIZATION AND CONFIGURATION # ===================================================================================== - def _initialize_static_world(self): + def _initialize_static_world(self) -> None: """Initialize static world geometry from USD stage. Reads static environment geometry once during planner initialization to establish @@ -318,7 +319,7 @@ def get_object_pose(self, object_name: str) -> Pose | None: return None # Search for object in world model - for obj_list, obj_type in [ + for obj_list, _ in [ (world_model.mesh, "mesh"), (world_model.cuboid, "cuboid"), ]: @@ -374,18 +375,11 @@ def get_attached_pose(self, link_name: str, joint_state: JointState | None = Non if self.debug: print(f"DEBUG GET_ATTACHED_POSE: Using {ee_link} for {link_name}") return link_poses[ee_link] - else: - if self.debug: - print(f"WARNING: {ee_link} not found, using EE pose") - return self.get_ee_pose(joint_state) # Return directly for other links if link_name in link_poses: return link_poses[link_name] - else: - if self.debug: - print(f"WARNING: Link {link_name} not found, using EE pose") - return self.get_ee_pose(joint_state) + raise KeyError(f"Link {link_name} not found in computed link poses") def create_attachment( self, object_name: str, link_name: str | None = None, joint_state: JointState | None = None @@ -485,10 +479,9 @@ def _get_world_object_names(self) -> list[str]: # Handle case where world_model might be a list if isinstance(world_model, list): - if len(world_model) > self.env_id: - world_model = world_model[self.env_id] - else: + if len(world_model) <= self.env_id: return [] + world_model = world_model[self.env_id] object_names = [] @@ -518,90 +511,82 @@ def _sync_object_poses_with_isaaclab(self) -> None: The method updates both the world model and the collision checker to ensure consistency across all cuRobo components. """ - try: - # Get cached object mappings and world model - object_mappings = self._get_object_mappings() - world_model = self.motion_gen.world_coll_checker.world_model - rigid_objects = self.env.scene.rigid_objects + # Get cached object mappings and world model + object_mappings = self._get_object_mappings() + world_model = self.motion_gen.world_coll_checker.world_model + rigid_objects = self.env.scene.rigid_objects + + updated_count = 0 + + for object_name, object_path in object_mappings.items(): + if object_name not in rigid_objects: + continue + + # Skip static mesh objects - they should not be dynamically updated + static_objects = getattr(self.config, "static_objects", []) + if any(static_name in object_name.lower() for static_name in static_objects): + if self.debug: + print(f"SYNC: Skipping static object {object_name}") + continue - updated_count = 0 + # Get current pose from Lab (may be on CPU or CUDA depending on --device flag) + obj = rigid_objects[object_name] + env_origin = self.env.scene.env_origins[self.env_id] + current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin + current_quat_raw = obj.data.root_quat_w[self.env_id] # (w, x, y, z) + + # Convert to cuRobo device and extract float values for pose list + current_pos = self._to_curobo_device(current_pos_raw) + current_quat = self._to_curobo_device(current_quat_raw) + + # Convert to cuRobo pose format [x, y, z, w, x, y, z] + pose_list = [ + float(current_pos[0].item()), + float(current_pos[1].item()), + float(current_pos[2].item()), + float(current_quat[0].item()), + float(current_quat[1].item()), + float(current_quat[2].item()), + float(current_quat[3].item()), + ] + # Update object pose in cuRobo's world model + if self._update_object_in_world_model(world_model, object_name, object_path, pose_list): + updated_count += 1 + + if self.debug: + print(f"SYNC: Updated {updated_count} object poses in cuRobo world model") + + # Sync object poses with collision checker + if updated_count > 0: + # Update individual obstacle poses in collision checker + # This preserves static mesh objects unlike load_collision_model which rebuilds everything for object_name, object_path in object_mappings.items(): if object_name not in rigid_objects: continue # Skip static mesh objects - they should not be dynamically updated - if any(static_name in object_name.lower() for static_name in ["bin", "table", "wall", "floor"]): - if self.debug: - print(f"SYNC: Skipping static object {object_name}") + static_objects = getattr(self.config, "static_objects", []) + if any(static_name in object_name.lower() for static_name in static_objects): continue - # Get current pose from Lab (may be on CPU or CUDA depending on --device flag) + # Get current pose and update in collision checker obj = rigid_objects[object_name] env_origin = self.env.scene.env_origins[self.env_id] current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin - current_quat_raw = obj.data.root_quat_w[self.env_id] # (w, x, y, z) + current_quat_raw = obj.data.root_quat_w[self.env_id] - # Convert to cuRobo device and extract float values for pose list current_pos = self._to_curobo_device(current_pos_raw) current_quat = self._to_curobo_device(current_quat_raw) - # Convert to cuRobo pose format [x, y, z, w, x, y, z] - pose_list = [ - float(current_pos[0].item()), - float(current_pos[1].item()), - float(current_pos[2].item()), - float(current_quat[0].item()), - float(current_quat[1].item()), - float(current_quat[2].item()), - float(current_quat[3].item()), - ] - - # Update object pose in cuRobo's world model - if self._update_object_in_world_model(world_model, object_name, object_path, pose_list): - updated_count += 1 - - if self.debug: - print(f"SYNC: Updated {updated_count} object poses in cuRobo world model") - - # Sync object poses with collision checker - if updated_count > 0: - try: - # Update individual obstacle poses in collision checker - # This preserves static mesh objects unlike load_collision_model which rebuilds everything - for object_name, object_path in object_mappings.items(): - if object_name not in rigid_objects: - continue - - # Skip static mesh objects - they should not be dynamically updated - if any(static_name in object_name.lower() for static_name in ["bin", "table", "wall", "floor"]): - continue - - # Get current pose and update in collision checker - obj = rigid_objects[object_name] - env_origin = self.env.scene.env_origins[self.env_id] - current_pos_raw = obj.data.root_pos_w[self.env_id] - env_origin - current_quat_raw = obj.data.root_quat_w[self.env_id] - - current_pos = self._to_curobo_device(current_pos_raw) - current_quat = self._to_curobo_device(current_quat_raw) - - # Create cuRobo pose and update collision checker directly - curobo_pose = Pose(position=current_pos, quaternion=current_quat) - self.motion_gen.world_coll_checker.update_obstacle_pose( # type: ignore - object_path, curobo_pose, env_idx=self.env_id, update_cpu_reference=True - ) - - if self.debug: - print(f"Updated {updated_count} object poses in collision checker") - - except Exception as e: - if self.debug: - print(f"ERROR updating collision checker poses: {e}") + # Create cuRobo pose and update collision checker directly + curobo_pose = Pose(position=current_pos, quaternion=current_quat) + self.motion_gen.world_coll_checker.update_obstacle_pose( # type: ignore + object_path, curobo_pose, env_idx=self.env_id, update_cpu_reference=True + ) - except Exception as e: if self.debug: - print(f"ERROR in pose synchronization: {e}") + print(f"Updated {updated_count} object poses in collision checker") def _get_object_mappings(self) -> dict[str, str]: """Get object mappings with caching for performance optimization. @@ -863,7 +848,7 @@ def has_attached_objects(self) -> bool: True if one or more objects are attached, False if no attachments exist """ # With cumotion.py pattern, check if attached_objects dict is non-empty - return len(self.attached_objects) > 0 + return len(self.attached_objects) != 0 # ===================================================================================== # JOINT STATE AND KINEMATICS @@ -1032,10 +1017,9 @@ def plan_motion( if link != self.config.attached_object_link_name ] for link_name in robot_links: - if hasattr(self.motion_gen.kinematics.kinematics_config, "get_link_spheres"): - link_spheres = self.motion_gen.kinematics.kinematics_config.get_link_spheres(link_name) - if link_spheres is not None: - robot_link_count += int(torch.sum(link_spheres[:, 3] > 0).item()) + link_spheres = self.motion_gen.kinematics.kinematics_config.get_link_spheres(link_name) + if link_spheres is not None: + robot_link_count += int(torch.sum(link_spheres[:, 3] > 0).item()) # Split spheres for i, sphere in enumerate(sphere_list): @@ -1157,7 +1141,7 @@ def _plan_to_contact_pose( result: Any = self.motion_gen.plan_single(start_state, goal_pose, self.plan_config) if result.success.item(): - if result.optimized_plan is not None and len(result.optimized_plan.position) > 0: + if result.optimized_plan is not None and len(result.optimized_plan.position) != 0: self._current_plan = result.optimized_plan if self.debug: print(f"Using optimized plan with {len(self._current_plan.position)} waypoints") @@ -1184,7 +1168,6 @@ def _plan_to_contact_pose( except Exception as e: if self.debug: print(f"Error during planning: {e}") - planning_success = False # Always restore sphere state after planning, regardless of success if contact: @@ -1346,7 +1329,7 @@ def _linearly_retime_plan( distances = torch.norm(deltas, dim=-1) waypoints = [path[0]] - for i, (distance, waypoint) in enumerate(zip(distances, path[1:])): + for distance, waypoint in zip(distances, path[1:]): if distance > 1e-6: waypoints.append(waypoint) @@ -1359,8 +1342,6 @@ def _linearly_retime_plan( deltas = waypoints[1:] - waypoints[:-1] distances = torch.norm(deltas, dim=-1) cum_distances = torch.cat([torch.zeros(1, device=distances.device), torch.cumsum(distances, dim=0)]) - else: - cum_distances = torch.zeros(1, device=path.device) if len(waypoints) < 2 or cum_distances[-1] < 1e-6: return plan @@ -1431,7 +1412,7 @@ def get_next_waypoint_ee_pose(self) -> Pose: raise IndexError("No more waypoints in the plan.") next_joint_state: JointState = self._current_plan[self._plan_index] self._plan_index += 1 - eef_state: Any = self.motion_gen.compute_kinematics(next_joint_state) + eef_state: CudaRobotModelState = self.motion_gen.compute_kinematics(next_joint_state) return eef_state.ee_pose def reset_plan(self) -> None: @@ -1829,13 +1810,15 @@ def _check_object_grasped(self, gripper_pos: torch.Tensor, object_name: str) -> True if object is detected as grasped """ gripper_open_val = self.config.grasp_gripper_open_val + object_grasped = gripper_pos[0].item() < gripper_open_val - if gripper_pos[0].item() < gripper_open_val: - print(f"Object {object_name} is grasped") - return True - else: - print(f"Object {object_name} is not grasped") - return False + print( + f"Object {object_name} is grasped: {object_grasped}" + if object_grasped + else f"Object {object_name} is not grasped" + ) + + return object_grasped def _set_gripper_state(self, has_attached_objects: bool) -> None: """Configure gripper joint positions based on object attachment status. diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py index a3c5d26edf9..7a1ca7eda13 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/curobo_planner_config.py @@ -68,6 +68,10 @@ class CuroboPlannerConfig: world_config_file: str = "collision_table.yml" """CuRobo world configuration file (without path).""" + # Static objects to not update in the world model + static_objects: list[str] = field(default_factory=list) + """Names of static objects to not update in the world model.""" + # Motion planning parameters collision_checker_type: CollisionCheckerType = CollisionCheckerType.MESH """Type of collision checker to use.""" @@ -386,13 +390,14 @@ def franka_config(cls) -> "CuroboPlannerConfig": def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": """Create configuration for Franka stacking cube in a bin.""" config = cls.franka_config() + config.static_objects = ["bin", "table"] config.gripper_closed_positions = {"panda_finger_joint1": 0.024, "panda_finger_joint2": 0.024} config.approach_distance = 0.05 config.retreat_distance = 0.07 config.surface_sphere_radius = 0.01 config.debug_planner = False config.collision_activation_distance = 0.02 - config.visualize_plan = True + config.visualize_plan = False config.enable_finetune_trajopt = True config.motion_noise_scale = 0.02 config.get_world_config = lambda: config._get_world_config_with_table_adjustment() @@ -402,6 +407,7 @@ def franka_stack_cube_bin_config(cls) -> "CuroboPlannerConfig": def franka_stack_cube_config(cls) -> "CuroboPlannerConfig": """Create configuration for Franka stacking a normal cube.""" config = cls.franka_config() + config.static_objects = ["table"] config.visualize_plan = False config.debug_planner = False config.motion_noise_scale = 0.02 diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py index d9ebbaeb157..c92f092c65e 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/plan_visualizer.py @@ -186,7 +186,7 @@ def signal_handler(signum, frame): if PSUTIL_AVAILABLE: print("Enhanced process monitoring enabled") - def _start_parent_process_monitoring(self): + def _start_parent_process_monitoring(self) -> None: """Start monitoring the parent process and cleanup when it dies.""" if not PSUTIL_AVAILABLE: if self.debug: @@ -195,7 +195,7 @@ def _start_parent_process_monitoring(self): self._monitor_active = True - def monitor_parent_process(): + def monitor_parent_process() -> None: """Monitor thread function that watches the parent process.""" if self.debug: print(f"Starting parent process monitor for PID {self._parent_pid}") @@ -229,7 +229,7 @@ def monitor_parent_process(): self._monitor_thread = threading.Thread(target=monitor_parent_process, daemon=True) self._monitor_thread.start() - def _kill_rerun_processes(self): + def _kill_rerun_processes(self) -> None: """Enhanced method to kill Rerun viewer processes using psutil.""" try: if PSUTIL_AVAILABLE: diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py index 44f71fc99e1..bf6eabdb057 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_cube_stack.py @@ -25,6 +25,7 @@ import gymnasium as gym import torch +from collections.abc import Generator import isaaclab.utils.math as math_utils from isaaclab.assets import Articulation, RigidObject @@ -86,7 +87,7 @@ def _execute_gripper_action( @pytest.fixture(scope="class") -def cube_stack_test_env(): +def cube_stack_test_env() -> Generator[dict[str, Any], None, None]: """Create the environment and motion planner once for the test suite and yield them.""" random.seed(SEED) torch.manual_seed(SEED) @@ -140,7 +141,7 @@ def cube_stack_test_env(): class TestCubeStackPlanner: @pytest.fixture(autouse=True) - def setup(self, cube_stack_test_env): + def setup(self, cube_stack_test_env) -> None: self.env: ManagerBasedEnv = cube_stack_test_env["env"] self.robot: Articulation = cube_stack_test_env["robot"] self.planner: CuroboPlanner = cube_stack_test_env["planner"] diff --git a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py index 465961456a2..2925f5d836f 100644 --- a/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py +++ b/source/isaaclab_mimic/isaaclab_mimic/motion_planners/curobo/test/test_curobo_planner_franka.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: Apache-2.0 import random +from collections.abc import Generator from typing import Any import pytest @@ -47,7 +48,7 @@ @pytest.fixture(scope="class") -def curobo_test_env(): +def curobo_test_env() -> Generator[dict[str, Any], None, None]: """Set up the environment for the Curobo test and yield test-critical data.""" random.seed(SEED) torch.manual_seed(SEED) @@ -107,7 +108,7 @@ class TestCuroboPlanner: """Test suite for the Curobo motion planner, focusing on obstacle avoidance.""" @pytest.fixture(autouse=True) - def setup(self, curobo_test_env): + def setup(self, curobo_test_env) -> None: """Inject the test environment into the test class instance.""" self.env: ManagerBasedEnv = curobo_test_env["env"] self.robot: Articulation = curobo_test_env["robot"] @@ -115,7 +116,7 @@ def setup(self, curobo_test_env): self.goal_pose_visualizer: VisualizationMarkers | None = curobo_test_env["goal_pose_visualizer"] self.home_j: torch.Tensor = curobo_test_env["home_j"] - def _visualize_goal_pose(self, pos: torch.Tensor, quat: torch.Tensor): + def _visualize_goal_pose(self, pos: torch.Tensor, quat: torch.Tensor) -> None: """Visualize the goal pose using frame markers if not in headless mode.""" if headless or self.goal_pose_visualizer is None: return @@ -123,7 +124,7 @@ def _visualize_goal_pose(self, pos: torch.Tensor, quat: torch.Tensor): quat_vis = quat.unsqueeze(0) self.goal_pose_visualizer.visualize(translations=pos_vis, orientations=quat_vis) - def _execute_current_plan(self): + def _execute_current_plan(self) -> None: """Replay the waypoints of the current plan in the simulator for visualization.""" if headless or self.planner.current_plan is None: return @@ -132,7 +133,7 @@ def _execute_current_plan(self): self._set_arm_positions(q_tensor) self.env.sim.step(render=True) - def _set_arm_positions(self, q: torch.Tensor): + def _set_arm_positions(self, q: torch.Tensor) -> None: """Set the joint positions of the robot's arm, appending default gripper values if necessary.""" if q.dim() == 1: q = q.unsqueeze(0) @@ -144,7 +145,7 @@ def _set_arm_positions(self, q: torch.Tensor): self.robot.write_joint_position_to_sim(q_full) @pytest.mark.parametrize("goal_spec, goal_id", predefined_ee_goals_and_ids) - def test_plan_to_predefined_goal(self, goal_spec, goal_id): + def test_plan_to_predefined_goal(self, goal_spec, goal_id) -> None: """Test planning to a predefined goal, ensuring the planner can find a path around an obstacle.""" print(f"Planning for goal: {goal_id}")