From 620ce2b08dc905d0a2b46e986b9da6adbe1f4637 Mon Sep 17 00:00:00 2001 From: jsmith-bdai <142246516+jsmith-bdai@users.noreply.github.com> Date: Wed, 10 Jan 2024 23:12:33 -0500 Subject: [PATCH] Fixes source frame indexing in FrameTransfomer sensor (#350) # Description The FrameTransformer sensor always assumed that the source frame index is 0 in the parsed regex expression. However, a recent issue appeared where this is not the case, and it led to a bug due to the hard-coded value. This MR fixes the source frame index in the sensor and also adds a unit test to check this behavior. Fixes: https://github.com/NVIDIA-Omniverse/Orbit/issues/170 ## Type of change - Bug fix (non-breaking change which fixes an issue) ## Checklist - [x] I have run the [`pre-commit` checks](https://pre-commit.com/) with `./orbit.sh --format` - [x] I have made corresponding changes to the documentation - [x] My changes generate no new warnings - [x] I have added tests that prove my fix is effective or that my feature works - [x] I have run all the tests with `./orbit.sh --test` and they pass - [x] I have updated the changelog and the corresponding version in the extension's `config/extension.toml` file - [x] I have added my name to the `CONTRIBUTORS.md` or my name already exists there --------- Co-authored-by: Mayank Mittal --- .../omni.isaac.orbit/config/extension.toml | 2 +- .../omni.isaac.orbit/docs/CHANGELOG.rst | 21 ++ .../frame_transformer/frame_transformer.py | 165 ++++----- .../frame_transformer_data.py | 48 ++- .../test/sensors/check_frame_transformer.py | 260 -------------- .../test/sensors/test_frame_transformer.py | 319 ++++++++++++++++++ .../state_machine/lift_cube_sm.py | 2 +- .../04_sensors/run_frame_transformer.py | 24 +- .../tutorials/04_sensors/run_usd_camera.py | 6 +- 9 files changed, 491 insertions(+), 356 deletions(-) delete mode 100644 source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py create mode 100644 source/extensions/omni.isaac.orbit/test/sensors/test_frame_transformer.py diff --git a/source/extensions/omni.isaac.orbit/config/extension.toml b/source/extensions/omni.isaac.orbit/config/extension.toml index 17ce4be821..3a0ffde5da 100644 --- a/source/extensions/omni.isaac.orbit/config/extension.toml +++ b/source/extensions/omni.isaac.orbit/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.10.11" +version = "0.10.12" # Description title = "ORBIT framework for Robot Learning" diff --git a/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst b/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst index 2afb582d6c..bfd2941fb2 100644 --- a/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst @@ -1,6 +1,27 @@ Changelog --------- +0.10.12 (2024-01-10) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed indexing of source and target frames in the :class:`omni.isaac.orbit.sensors.FrameTransformer` class. + Earlier, it always assumed that the source frame body is at index 0. Now, it uses the body index of the + source frame to compute the transformation. + +Deprecated +^^^^^^^^^^ + +* Renamed quantities in the :class:`omni.isaac.orbit.sensors.FrameTransformerData` class to be more + consistent with the terminology used in the asset classes. The following quantities are deprecated: + + * ``target_rot_w`` -> ``target_quat_w`` + * ``source_rot_w`` -> ``source_quat_w`` + * ``target_rot_source`` -> ``target_quat_source`` + + 0.10.11 (2024-01-08) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py index 3aba43dde9..2c14a88e06 100644 --- a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py @@ -49,11 +49,20 @@ class FrameTransformer(SensorBase): typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the manipulator. - Note: + .. note:: + Currently, this implementation only handles frames within an articulation. This is because the frame regex expressions are resolved based on their parent prim path. This can be extended to handle frames outside of articulation by using the frame prim path instead. However, this would require - additional checks to ensure that the user-specified frames are valid. + additional checks to ensure that the user-specified frames are valid which is not currently implemented. + + .. warning:: + + The implementation assumes that the parent body of a target frame is not the same as that + of the source frame (i.e. :attr:`FrameTransformerCfg.prim_path`). While a corner case, this can occur + if the user specifies the same prim path for both the source frame and target frame. In this case, + the target frame will be ignored and not reported. This is a limitation of the current implementation + and will be fixed in a future release. """ @@ -75,10 +84,10 @@ def __str__(self) -> str: """Returns: A string containing information about the instance.""" return ( f"FrameTransformer @ '{self.cfg.prim_path}': \n" - f"\ttracked body frames: {self._tracked_body_names} \n" + f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" f"\tnumber of envs: {self._num_envs}\n" - f"\tsource frame: {self._tracked_body_names[0]}\n" - f"\ttarget frames: {self._target_frame_names} count: {len(self._target_frame_names)}\n" + f"\tsource body frame: {self._source_frame_body_name}\n" + f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" ) """ @@ -112,29 +121,27 @@ def _initialize_impl(self): # resolve source frame offset source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) - source_frame_offset_rot = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is # not the identity quaternion for efficiency in _update_buffer_impl self._apply_source_frame_offset = True # Handle source frame offsets - if is_identity_pose(source_frame_offset_pos, source_frame_offset_rot): - carb.log_verbose(f"Not applying offset for source frame as it is identity: {self.cfg.prim_path}") + if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): + carb.log_verbose(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") self._apply_source_frame_offset = False else: - carb.log_verbose(f"Applying offset for source frame as it is not identity: {self.cfg.prim_path}") + carb.log_verbose(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) - self._source_frame_offset_rot = source_frame_offset_rot.unsqueeze(0).repeat(self._num_envs, 1) - - # The offsets associated with each target frame - target_offsets = {} + self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) # Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames # based upon the same body name and we don't want to create unnecessary views - body_names_to_frames = {} - + body_names_to_frames: dict[str, set[str]] = {} + # The offsets associated with each target frame + target_offsets: dict[str, dict[str, torch.Tensor]] = {} # The frames whose offsets are not identity - non_identity_offset_frames = [] + non_identity_offset_frames: list[str] = [] # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl @@ -145,7 +152,7 @@ def _initialize_impl(self): # First element is None because user can't specify source frame name frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] - # First element is None because source frame offset is handled above + # First element is None because source frame offset is handled separately frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets): # Find correct prim @@ -158,21 +165,19 @@ def _initialize_impl(self): for prim in matching_prims: # Get the prim path of the matching prim matching_prim_path = prim.GetPath().pathString - # check if it is a rigid prim + # Check if it is a rigid prim if not prim.HasAPI(UsdPhysics.RigidBodyAPI): raise ValueError( f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" " rigid body. The class only supports transformations between rigid bodies." ) + # Get the name of the body body_name = matching_prim_path.rsplit("/", 1)[-1] + # Use body name if frame isn't specified by user + frame_name = frame if frame is not None else body_name - # Use body_name if frame isn't specified by user - if frame is None: - frame_name = body_name - else: - frame_name = frame - + # Keep track of which frames are associated with which bodies if body_name in body_names_to_frames: body_names_to_frames[body_name].add(frame_name) else: @@ -180,42 +185,31 @@ def _initialize_impl(self): if offset is not None: offset_pos = torch.tensor(offset.pos, device=self.device) - offset_rot = torch.tensor(offset.rot, device=self.device) - # Check if we need to apply offsets - if not is_identity_pose(offset_pos, offset_rot): + offset_quat = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets (optimized code path in _update_buffer_impl) + if not is_identity_pose(offset_pos, offset_quat): non_identity_offset_frames.append(frame_name) self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "rot": offset_rot} + target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} if not self._apply_target_frame_offset: carb.log_info( - f"Not applying offset from {self.cfg.prim_path} to target frames as all are identity: {frames}" + f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" + f" are identity: {frames[1:]}" ) else: carb.log_info( - f"Applying offset from {self.cfg.prim_path} as the following frames are non-identity:" + f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" f" {non_identity_offset_frames}" ) # The names of bodies that RigidPrimView will be tracking to later extract transforms from - self._tracked_body_names = list(body_names_to_frames.keys()) - num_tracked_bodies = len(self._tracked_body_names) - - # The number of target body frames being tracked by RigidPrimView. Subtract one to remove source frame from - # count of frames - self._num_target_body_frames = num_tracked_bodies - 1 - - # Determine indices into all tracked body frames for both source and target frames - all_idxs = torch.arange(self._num_envs * num_tracked_bodies) - self._source_frame_idxs = torch.arange(self._num_envs) * num_tracked_bodies - self._target_frame_idxs = all_idxs[~torch.isin(all_idxs, self._source_frame_idxs)] - + tracked_body_names = list(body_names_to_frames.keys()) # Construct regex expression for the body names - body_names_regex = r"(" + "|".join(self._tracked_body_names) + r")" + body_names_regex = r"(" + "|".join(tracked_body_names) + r")" body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" - - # create simulation view + # Create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # Create a prim view for all frames and initialize it @@ -226,19 +220,28 @@ def _initialize_impl(self): # by frame name correctly all_prim_paths = self._frame_physx_view.prim_paths - # Only need first env as the names and their orderring are the same across environments - first_env_prim_paths = all_prim_paths[0 : self._num_target_body_frames + 1] + # Only need first env as the names and their ordering are the same across environments + first_env_prim_paths = all_prim_paths[0 : len(tracked_body_names)] first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths] - target_frame_body_names = first_env_body_names[1:] + # Re-parse the list as it may have moved when resolving regex above + # -- source frame + self._source_frame_body_name = self.cfg.prim_path.split("/")[-1] + source_frame_index = first_env_body_names.index(self._source_frame_body_name) + # -- target frames + self._target_frame_body_names = first_env_body_names[:] + self._target_frame_body_names.remove(self._source_frame_body_name) - # The position and rotation components of target frame offsets - target_frame_offset_pos = [] - target_frame_offset_rot = [] + # Determine indices into all tracked body frames for both source and target frames + all_ids = torch.arange(self._num_envs * len(tracked_body_names)) + self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] # The name of each of the target frame(s) - either user specified or defaulted to the body name - self._target_frame_names = [] - + self._target_frame_names: list[str] = [] + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_quat = [] # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations # when updating sensor in _update_buffers_impl @@ -247,33 +250,33 @@ def _initialize_impl(self): # Go through each body name and determine the number of duplicates we need for that frame # and extract the offsets. This is all done to handles the case where multiple frames # reference the same body, but have different names and/or offsets - for i, body_name in enumerate(target_frame_body_names): + for i, body_name in enumerate(self._target_frame_body_names): for frame in body_names_to_frames[body_name]: target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_rot.append(target_offsets[frame]["rot"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) self._target_frame_names.append(frame) duplicate_frame_indices.append(i) - duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + num_target_body_frames = len(tracked_body_names) - 1 self._duplicate_frame_indices = torch.cat( - [duplicate_frame_indices + self._num_target_body_frames * env_num for env_num in range(self._num_envs)] + [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] ) - # Stack up all the frame offsets + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_rot = torch.stack(target_frame_offset_rot).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) # fill the data buffer self._data.target_frame_names = self._target_frame_names self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) - self._data.source_rot_w = torch.zeros(self._num_envs, 4, device=self._device) + self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_rot_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) - self._data.target_pos_source = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_rot_source = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) + self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) + self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) + self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) def _update_buffers_impl(self, env_ids: Sequence[int]): """Fills the buffers of the sensor data.""" @@ -288,52 +291,52 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") # Process source frame transform - source_frames = transforms[self._source_frame_idxs] + source_frames = transforms[self._source_frame_body_ids] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_source_frame_offset: - source_pos_w, source_rot_w = combine_frame_transforms( + source_pos_w, source_quat_w = combine_frame_transforms( source_frames[:, :3], source_frames[:, 3:], self._source_frame_offset_pos, - self._source_frame_offset_rot, + self._source_frame_offset_quat, ) else: source_pos_w = source_frames[:, :3] - source_rot_w = source_frames[:, 3:] + source_quat_w = source_frames[:, 3:] # Process target frame transforms - target_frames = transforms[self._target_frame_idxs] + target_frames = transforms[self._target_frame_body_ids] duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] - duplicated_target_frame_rot_w = target_frames[self._duplicate_frame_indices, 3:] + duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_target_frame_offset: - target_pos_w, target_rot_w = combine_frame_transforms( + target_pos_w, target_quat_w = combine_frame_transforms( duplicated_target_frame_pos_w, - duplicated_target_frame_rot_w, + duplicated_target_frame_quat_w, self._target_frame_offset_pos, - self._target_frame_offset_rot, + self._target_frame_offset_quat, ) else: target_pos_w = duplicated_target_frame_pos_w - target_rot_w = duplicated_target_frame_rot_w + target_quat_w = duplicated_target_frame_quat_w # Compute the transform of the target frame with respect to the source frame total_num_frames = len(self._target_frame_names) - target_pos_source, target_rot_source = subtract_frame_transforms( + target_pos_source, target_quat_source = subtract_frame_transforms( source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), - source_rot_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), + source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), target_pos_w, - target_rot_w, + target_quat_w, ) # Update buffers # note: The frame names / ordering don't change so no need to update them after initialization self._data.source_pos_w[:] = source_pos_w.view(-1, 3) - self._data.source_rot_w[:] = source_rot_w.view(-1, 4) + self._data.source_quat_w[:] = source_quat_w.view(-1, 4) self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) - self._data.target_rot_w[:] = target_rot_w.view(-1, total_num_frames, 4) + self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) - self._data.target_rot_source[:] = target_rot_source.view(-1, total_num_frames, 4) + self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers @@ -350,7 +353,7 @@ def _set_debug_vis_impl(self, debug_vis: bool): def _debug_vis_callback(self, event): # Update the visualized markers if self.frame_visualizer is not None: - self.frame_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_rot_w.view(-1, 4)) + self.frame_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_quat_w.view(-1, 4)) """ Internal simulation callbacks. diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py index 067869205e..8d9b9c1e20 100644 --- a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py @@ -6,6 +6,7 @@ from __future__ import annotations import torch +import warnings from dataclasses import dataclass @@ -14,11 +15,11 @@ class FrameTransformerData: """Data container for the frame transformer sensor.""" target_frame_names: list[str] = None - """Target frame names (this denotes the order that frame data will be ordered). + """Target frame names (this denotes the order in which that frame data is ordered). The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This usually follows the order in which the frames are defined in the config. However, in the case of - regex matching, the order may be different. + This usually follows the order in which the frames are defined in the config. However, in + the case of regex matching, the order may be different. """ target_pos_source: torch.Tensor = None @@ -26,28 +27,63 @@ class FrameTransformerData: Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ - target_rot_source: torch.Tensor = None + + target_quat_source: torch.Tensor = None """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ + target_pos_w: torch.Tensor = None """Position of the target frame(s) after offset (in world frame). Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. """ - target_rot_w: torch.Tensor = None + + target_quat_w: torch.Tensor = None """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. """ + source_pos_w: torch.Tensor = None """Position of the source frame after offset (in world frame). Shape is (N, 3), where N is the number of environments. """ - source_rot_w: torch.Tensor = None + + source_quat_w: torch.Tensor = None """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). Shape is (N, 4), where N is the number of environments. """ + + @property + def target_rot_source(self) -> torch.Tensor: + """Alias for :attr:`target_quat_source`. + + .. deprecated:: v0.2.1 + Use :attr:`target_quat_source` instead. Will be removed in v0.3.0. + """ + warnings.warn("'target_rot_source' is deprecated, use 'target_quat_source' instead.", DeprecationWarning) + return self.target_quat_source + + @property + def target_rot_w(self) -> torch.Tensor: + """Alias for :attr:`target_quat_w`. + + .. deprecated:: v0.2.1 + Use :attr:`target_quat_w` instead. Will be removed in v0.3.0. + """ + warnings.warn("'target_rot_w' is deprecated, use 'target_quat_w' instead.", DeprecationWarning) + return self.target_quat_w + + @property + def source_rot_w(self) -> torch.Tensor: + """Alias for :attr:`source_quat_w`. + + .. deprecated:: v0.2.1 + Use :attr:`source_quat_w` instead. Will be removed in v0.3.0. + """ + warnings.warn("'source_rot_w' is deprecated, use 'source_quat_w' instead.", DeprecationWarning) + return self.source_quat_w diff --git a/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py b/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py deleted file mode 100644 index 2b0c7b24b6..0000000000 --- a/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) 2022-2024, The ORBIT Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -""" -This script checks the FrameTransformer sensor by visualizing the frames that it creates. -""" - -from __future__ import annotations - -"""Launch Isaac Sim Simulator first.""" - - -import argparse -import math -from scipy.spatial.transform import Rotation - -from omni.isaac.orbit.app import AppLauncher - -# add argparse arguments -parser = argparse.ArgumentParser( - description="This script checks the FrameTransformer sensor by visualizing the frames that it creates." -) -parser.add_argument( - "--mode", - type=str, - choices=["feet", "all"], - default="all", - help="Dictates the the type of frames to use for FrameTransformer.", -) -parser.add_argument( - "--visualize", - action="store_true", - help=( - "Whether to enable FrameTransformer's debug_vis (True) or visualize each frame one at a" - " time and print to console (False)." - ), -) -parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") -AppLauncher.add_app_launcher_args(parser) -args_cli = parser.parse_args() - -# launch omniverse app -app_launcher = AppLauncher(headless=args_cli.headless) -simulation_app = app_launcher.app - -"""Rest everything follows.""" - - -import omni.isaac.orbit.sim as sim_utils -from omni.isaac.orbit.assets import AssetBaseCfg -from omni.isaac.orbit.markers import VisualizationMarkers -from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG -from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg -from omni.isaac.orbit.sensors import FrameTransformerCfg, OffsetCfg -from omni.isaac.orbit.sim import SimulationContext -from omni.isaac.orbit.terrains import TerrainImporterCfg -from omni.isaac.orbit.utils import configclass -from omni.isaac.orbit.utils.timer import Timer - -## -# Pre-defined configs -## -from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip - - -def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): - """Converts Euler XYZ to Quaternion (w, x, y, z).""" - quat = Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() - return tuple(quat[[3, 0, 1, 2]].tolist()) - - -def euler_rpy_apply(rpy, xyz, degrees=False): - """Applies rotation from Euler XYZ on position vector.""" - rot = Rotation.from_euler("xyz", rpy, degrees=degrees) - return tuple(rot.apply(xyz).tolist()) - - -@configclass -class MySceneCfg(InteractiveSceneCfg): - """Example scene configuration.""" - - # terrain - flat terrain plane - terrain = TerrainImporterCfg( - prim_path="/World/ground", - terrain_type="plane", - ) - - # articulation - robot - robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") - - if args_cli.mode == "feet": - # Example where only feet position frames are created - frame_transformer = FrameTransformerCfg( - prim_path="{ENV_REGEX_NS}/Robot/base", - target_frames=[ - FrameTransformerCfg.FrameCfg( - name="LF_FOOT", - prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", - offset=OffsetCfg( - pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), - rot=quat_from_euler_rpy(0, 0, -math.pi / 2), - ), - ), - FrameTransformerCfg.FrameCfg( - name="RF_FOOT", - prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", - offset=OffsetCfg( - pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), - rot=quat_from_euler_rpy(0, 0, math.pi / 2), - ), - ), - FrameTransformerCfg.FrameCfg( - name="LH_FOOT", - prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK", - offset=OffsetCfg( - pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)), - rot=quat_from_euler_rpy(0, 0, -math.pi / 2), - ), - ), - FrameTransformerCfg.FrameCfg( - name="RH_FOOT", - prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK", - offset=OffsetCfg( - pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)), - rot=quat_from_euler_rpy(0, 0, math.pi / 2), - ), - ), - ], - debug_vis=args_cli.visualize, - ) - - elif args_cli.mode == "all": - # Example using .* to get full body + LF_FOOT to ensure these work together - frame_transformer = FrameTransformerCfg( - prim_path="{ENV_REGEX_NS}/Robot/base", - target_frames=[ - FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*"), - FrameTransformerCfg.FrameCfg( - prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", - name="LF_FOOT", - offset=OffsetCfg( - pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), - rot=quat_from_euler_rpy(0, 0, -math.pi / 2), - ), - ), - ], - debug_vis=args_cli.visualize, - ) - - # extras - light - light = AssetBaseCfg( - prim_path="/World/light", - spawn=sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)), - init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, 500.0)), - ) - - -def main(): - """Main function.""" - - # Load kit helper - sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) - # Set main camera - sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) - - # Spawn things into stage - with Timer("Setup scene"): - scene = InteractiveScene(MySceneCfg(num_envs=args_cli.num_envs, env_spacing=5.0, lazy_sensor_update=False)) - - # Play the simulator - with Timer("Time taken to play the simulator"): - sim.reset() - - # Now we are ready! - print("[INFO]: Setup complete...") - - # default joint targets - robot_actions = scene.articulations["robot"].data.default_joint_pos.clone() - # Define simulation stepping - sim_dt = sim.get_physics_dt() - sim_time = 0.0 - count = 0 - - # We only want one visualization at a time. This visualizer will be used - # to step through each frame so the user can verify that the correct frame - # is being visualized as the frame names are printing to console - if not args_cli.visualize: - cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript") - cfg.markers["frame"].scale = (0.05, 0.05, 0.05) - transform_visualizer = VisualizationMarkers(cfg) - else: - transform_visualizer = None - - frame_index = 0 - # Simulate physics - while simulation_app.is_running(): - # If simulation is stopped, then exit. - if sim.is_stopped(): - break - # If simulation is paused, then skip. - if not sim.is_playing(): - sim.step() - continue - # # reset - if count % 50 == 0: - # reset counters - sim_time = 0.0 - count = 0 - # reset root state - root_state = scene.articulations["robot"].data.default_root_state.clone() - root_state[:, :3] += scene.env_origins - joint_pos = scene.articulations["robot"].data.default_joint_pos - joint_vel = scene.articulations["robot"].data.default_joint_vel - # -- set root state - # -- robot - scene.articulations["robot"].write_root_state_to_sim(root_state) - scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) - root_state[:, 1] += 1.0 - # reset buffers - scene.reset() - print(">>>>>>>> Reset!") - # perform this loop at policy control freq (50 Hz) - for _ in range(4): - # set joint targets - scene.articulations["robot"].set_joint_position_target(robot_actions) - # write data to sim - scene.write_data_to_sim() - # perform step - sim.step() - # read data from sim - scene.update(sim_dt) - - # Change the frame that we are visualizing to ensure that frame names - # are correctly associated with the frames - if not args_cli.visualize: - if count % 50 == 0: - frame_names = scene["frame_transformer"].data.target_frame_names - - frame_name = frame_names[frame_index] - print(f"Displaying {frame_index}: {frame_name}") - frame_index += 1 - frame_index = frame_index % len(frame_names) - - # visualize frame - pos = scene["frame_transformer"].data.target_pos_w[:, frame_index] - rot = scene["frame_transformer"].data.target_rot_w[:, frame_index] - transform_visualizer.visualize(pos, rot) - - # update sim-time - sim_time += sim_dt * 4 - count += 1 - - -if __name__ == "__main__": - # Run the main function - main() - # Close the simulator - simulation_app.close() diff --git a/source/extensions/omni.isaac.orbit/test/sensors/test_frame_transformer.py b/source/extensions/omni.isaac.orbit/test/sensors/test_frame_transformer.py new file mode 100644 index 0000000000..44db8b3d62 --- /dev/null +++ b/source/extensions/omni.isaac.orbit/test/sensors/test_frame_transformer.py @@ -0,0 +1,319 @@ +# Copyright (c) 2022-2024, The ORBIT Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script checks the FrameTransformer sensor by visualizing the frames that it creates. +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + +from omni.isaac.orbit.app import AppLauncher + +# launch omniverse app +app_launcher = AppLauncher(headless=True) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import math +import scipy.spatial.transform as tf +import torch +import traceback +import unittest + +import carb +import omni.isaac.core.utils.stage as stage_utils + +import omni.isaac.orbit.sim as sim_utils +import omni.isaac.orbit.utils.math as math_utils +from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.orbit.sensors import FrameTransformerCfg, OffsetCfg +from omni.isaac.orbit.terrains import TerrainImporterCfg +from omni.isaac.orbit.utils import configclass + +## +# Pre-defined configs +## +from omni.isaac.orbit_assets.anymal import ANYMAL_C_CFG # isort:skip + + +def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): + """Converts Euler XYZ to Quaternion (w, x, y, z).""" + quat = tf.Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() + return tuple(quat[[3, 0, 1, 2]].tolist()) + + +def euler_rpy_apply(rpy, xyz, degrees=False): + """Applies rotation from Euler XYZ on position vector.""" + rot = tf.Rotation.from_euler("xyz", rpy, degrees=degrees) + return tuple(rot.apply(xyz).tolist()) + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration.""" + + # terrain - flat terrain plane + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane") + + # articulation - robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # sensors - frame transformer (filled inside unit test) + frame_transformer: FrameTransformerCfg = None + + +class TestFrameTransformer(unittest.TestCase): + """Test for frame transformer sensor.""" + + def setUp(self): + """Create a blank new stage for each test.""" + # Create a new stage + stage_utils.create_new_stage() + # Load kit helper + self.sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + # Set main camera + self.sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) + + def tearDown(self): + """Stops simulator after each test.""" + # stop simulation + # self.sim.stop() + # clear the stage + self.sim.clear_all_callbacks() + self.sim.clear_instance() + + """ + Tests + """ + + def test_frame_transformer_feet_wrt_base(self): + """Test feet transformations w.r.t. base source frame. + + In this test, the source frame is the robot base. This frame is at index 0, when + the frame bodies are sorted in the order of the regex matching in the frame transformer. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="LF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="LH_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RH_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + self.sim.reset() + + # Acquire the index of ground truth bodies + feet_indices, feet_names = scene.articulations["robot"].find_bodies( + ["LF_FOOT", "RF_FOOT", "LH_FOOT", "RH_FOOT"] + ) + # Check names are parsed the same order + user_feet_names = [f"{name}_USER" for name in feet_names] + self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names) + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + # Define simulation stepping + sim_dt = self.sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos + joint_vel = scene.articulations["robot"].data.default_joint_vel + # -- set root state + # -- robot + scene.articulations["robot"].write_root_state_to_sim(root_state) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + self.sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_state_w[:, :7] + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) + + # check if relative transforms are same + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + for index in range(len(feet_indices)): + # ground-truth + foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] + ) + # check if they are same + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) + + def test_frame_transformer_feet_wrt_thigh(self): + """Test feet transformation w.r.t. thigh source frame. + + In this test, the source frame is the LF leg's thigh frame. This frame is not at index 0, + when the frame bodies are sorted in the order of the regex matching in the frame transformer. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_THIGH", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="LF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RF_FOOT_USER", + prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + self.sim.reset() + + # Acquire the index of ground truth bodies + source_frame_index = scene.articulations["robot"].find_bodies("LF_THIGH")[0][0] + feet_indices, feet_names = scene.articulations["robot"].find_bodies(["LF_FOOT", "RF_FOOT"]) + # Check names are parsed the same order + user_feet_names = [f"{name}_USER" for name in feet_names] + self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names) + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + # Define simulation stepping + sim_dt = self.sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos + joint_vel = scene.articulations["robot"].data.default_joint_vel + # -- set root state + # -- robot + scene.articulations["robot"].write_root_state_to_sim(root_state) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + self.sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + source_pose_w_gt = scene.articulations["robot"].data.body_state_w[:, source_frame_index, :7] + feet_pos_w_gt = scene.articulations["robot"].data.body_pos_w[:, feet_indices] + feet_quat_w_gt = scene.articulations["robot"].data.body_quat_w[:, feet_indices] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w + feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w + feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + # check if they are same + torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) + + # check if relative transforms are same + feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source + feet_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + for index in range(len(feet_indices)): + # ground-truth + foot_pos_b, foot_quat_b = math_utils.subtract_frame_transforms( + source_pose_w_gt[:, :3], source_pose_w_gt[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] + ) + # check if they are same + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) + + +if __name__ == "__main__": + try: + unittest.main() + except Exception as err: + carb.log_error(err) + carb.log_error(traceback.format_exc()) + raise + finally: + # close sim app + simulation_app.close() diff --git a/source/standalone/environments/state_machine/lift_cube_sm.py b/source/standalone/environments/state_machine/lift_cube_sm.py index bbb9bcaf52..3e566c549f 100644 --- a/source/standalone/environments/state_machine/lift_cube_sm.py +++ b/source/standalone/environments/state_machine/lift_cube_sm.py @@ -256,7 +256,7 @@ def main(): # -- end-effector frame ee_frame_sensor = env.unwrapped.scene["ee_frame"] tcp_rest_position = ee_frame_sensor.data.target_pos_w[..., 0, :].clone() - env.unwrapped.scene.env_origins - tcp_rest_orientation = ee_frame_sensor.data.target_rot_w[..., 0, :].clone() + tcp_rest_orientation = ee_frame_sensor.data.target_quat_w[..., 0, :].clone() # -- object frame object_data: RigidObjectData = env.unwrapped.scene["object"].data object_position = object_data.root_pos_w - env.unwrapped.scene.env_origins diff --git a/source/standalone/tutorials/04_sensors/run_frame_transformer.py b/source/standalone/tutorials/04_sensors/run_frame_transformer.py index 81eb9b7631..cf2b442455 100644 --- a/source/standalone/tutorials/04_sensors/run_frame_transformer.py +++ b/source/standalone/tutorials/04_sensors/run_frame_transformer.py @@ -38,6 +38,8 @@ import math import torch +import omni.isaac.debug_draw._debug_draw as omni_debug_draw + import omni.isaac.orbit.sim as sim_utils import omni.isaac.orbit.utils.math as math_utils from omni.isaac.orbit.assets import Articulation @@ -65,7 +67,7 @@ def define_sensor() -> FrameTransformer: FrameTransformerCfg.FrameCfg(prim_path="/World/Robot/.*"), FrameTransformerCfg.FrameCfg( prim_path="/World/Robot/LF_SHANK", - name="LF_FOOT", + name="LF_FOOT_USER", offset=OffsetCfg(pos=tuple(pos_offset.tolist()), rot=tuple(rot_offset[0].tolist())), ), ], @@ -113,8 +115,11 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript") cfg.markers["frame"].scale = (0.1, 0.1, 0.1) transform_visualizer = VisualizationMarkers(cfg) + # debug drawing for lines connecting the frame + draw_interface = omni_debug_draw.acquire_debug_draw_interface() else: transform_visualizer = None + draw_interface = None frame_index = 0 # Simulate physics @@ -143,9 +148,20 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): frame_index = frame_index % len(frame_names) # visualize frame - pos = frame_transformer.data.target_pos_w[:, frame_index] - rot = frame_transformer.data.target_rot_w[:, frame_index] - transform_visualizer.visualize(pos, rot) + source_pos = frame_transformer.data.source_pos_w + source_quat = frame_transformer.data.source_quat_w + target_pos = frame_transformer.data.target_pos_w[:, frame_index] + target_quat = frame_transformer.data.target_quat_w[:, frame_index] + # draw the frames + transform_visualizer.visualize( + torch.cat([source_pos, target_pos], dim=0), torch.cat([source_quat, target_quat], dim=0) + ) + # draw the line connecting the frames + draw_interface.clear_lines() + # plain color for lines + lines_colors = [[1.0, 1.0, 0.0, 1.0]] * source_pos.shape[0] + line_thicknesses = [5.0] * source_pos.shape[0] + draw_interface.draw_lines(source_pos.tolist(), target_pos.tolist(), lines_colors, line_thicknesses) def main(): diff --git a/source/standalone/tutorials/04_sensors/run_usd_camera.py b/source/standalone/tutorials/04_sensors/run_usd_camera.py index 223ee3dec2..30167b95c3 100644 --- a/source/standalone/tutorials/04_sensors/run_usd_camera.py +++ b/source/standalone/tutorials/04_sensors/run_usd_camera.py @@ -59,9 +59,6 @@ from omni.isaac.orbit.utils import convert_dict_to_backend from omni.isaac.orbit.utils.math import project_points, transform_points, unproject_depth -# Acquire draw interface -draw_interface = omni_debug_draw.acquire_debug_draw_interface() - def define_sensor() -> Camera: """Defines the camera sensor to add to the scene.""" @@ -138,6 +135,9 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera") rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3) + # Acquire draw interface + draw_interface = omni_debug_draw.acquire_debug_draw_interface() + # Set pose: There are two ways to set the pose of the camera. # -- Option-1: Set pose using view eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device)