From cf737f724814e138b3f4e5f3b3df7c8a2779981d Mon Sep 17 00:00:00 2001 From: jsmith-bdai <142246516+jsmith-bdai@users.noreply.github.com> Date: Tue, 31 Oct 2023 08:28:07 -0400 Subject: [PATCH] Adds in frame transformer and demo script (#183) # Description Adds in a new `Sensor` that performs frame transforms. No tests as part of this initial PR as this feature is needed by developers. Tests will be added in with the improvements: https://github.com/isaac-orbit/orbit/issues/202. Fixes #133 ## Type of change - New feature (non-breaking change which adds functionality) ## Screenshots ![image](https://github.com/isaac-orbit/orbit/assets/142246516/d4e2ebde-377f-4e22-9c14-8ec19b7b7498) ## Checklist - [x] I have run the [`pre-commit` checks](https://pre-commit.com/) with `./orbit.sh --format` - [x] I have made corresponding changes to the documentation - [x] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [x] I have updated the changelog and the corresponding version in the extension's `config/extension.toml` file --------- Signed-off-by: jsmith-bdai <142246516+jsmith-bdai@users.noreply.github.com> --- .../omni.isaac.orbit/config/extension.toml | 2 +- .../omni.isaac.orbit/docs/CHANGELOG.rst | 10 + .../isaac/orbit/scene/interactive_scene.py | 10 +- .../omni/isaac/orbit/sensors/__init__.py | 15 +- .../sensors/frame_transformer/__init__.py | 16 + .../frame_transformer/frame_transformer.py | 352 ++++++++++++++++++ .../frame_transformer_cfg.py | 63 ++++ .../frame_transformer_data.py | 53 +++ .../omni/isaac/orbit/utils/math.py | 22 ++ .../test/sensors/check_frame_transformer.py | 254 +++++++++++++ .../omni.isaac.orbit/test/utils/test_math.py | 52 +++ 11 files changed, 846 insertions(+), 3 deletions(-) create mode 100644 source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py create mode 100644 source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py create mode 100644 source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py create mode 100644 source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py create mode 100644 source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py create mode 100644 source/extensions/omni.isaac.orbit/test/utils/test_math.py diff --git a/source/extensions/omni.isaac.orbit/config/extension.toml b/source/extensions/omni.isaac.orbit/config/extension.toml index 8e1de759b5..553b501cb1 100644 --- a/source/extensions/omni.isaac.orbit/config/extension.toml +++ b/source/extensions/omni.isaac.orbit/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.9.25" +version = "0.9.26" # Description title = "ORBIT framework for Robot Learning" diff --git a/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst b/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst index 3f75edfa1d..84ba49f0bb 100644 --- a/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.orbit/docs/CHANGELOG.rst @@ -1,6 +1,16 @@ Changelog --------- +0.9.26 (2023-10-31) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added the sensor implementation for :class:`omni.isaac.orbit.sensors.FrameTransformer` class. Currently, + it handles obtaining the transformation between two frames in the same articulation. + + 0.9.25 (2023-10-27) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/scene/interactive_scene.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/scene/interactive_scene.py index ccbfacc1ef..d47787927a 100644 --- a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/scene/interactive_scene.py +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/scene/interactive_scene.py @@ -19,7 +19,7 @@ from pxr import PhysxSchema from omni.isaac.orbit.assets import Articulation, ArticulationCfg, AssetBaseCfg, RigidObject, RigidObjectCfg -from omni.isaac.orbit.sensors import SensorBase, SensorBaseCfg +from omni.isaac.orbit.sensors import FrameTransformerCfg, SensorBase, SensorBaseCfg from omni.isaac.orbit.terrains import TerrainImporter, TerrainImporterCfg from .interactive_scene_cfg import InteractiveSceneCfg @@ -342,6 +342,14 @@ def _add_entities_from_cfg(self): elif isinstance(asset_cfg, RigidObjectCfg): self.rigid_objects[asset_name] = asset_cfg.class_type(asset_cfg) elif isinstance(asset_cfg, SensorBaseCfg): + # Update target frame path(s)' regex name space for FrameTransformer + if isinstance(asset_cfg, FrameTransformerCfg): + updated_target_frames = [] + for target_frame in asset_cfg.target_frames: + target_frame.prim_path = target_frame.prim_path.format(ENV_REGEX_NS=self.env_regex_ns) + updated_target_frames.append(target_frame) + asset_cfg.target_frames = updated_target_frames + self.sensors[asset_name] = asset_cfg.class_type(asset_cfg) elif isinstance(asset_cfg, AssetBaseCfg): # manually spawn asset diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py index 8740359bac..853b2445a4 100644 --- a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/__init__.py @@ -5,16 +5,29 @@ """ -This subpackage contains the sensor classes that are compatible with the Isaac Sim. We include both +This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both USD-based and custom sensors. The USD-based sensors are the ones that are available in Omniverse and require creating a USD prim for them. Custom sensors, on the other hand, are the ones that are implemented in Python and do not require creating a USD prim for them. + +A prim path (or expression) is still set for each sensor based on the following schema: + ++-------------------+--------------------------+---------------------------------------------------------------+ +| Sensor Type | Example Prim Path | Pre-check | ++===================+==========================+===============================================================+ +| Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera | +| Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | +| Ray Casters | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | +| Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | ++-------------------+--------------------------+---------------------------------------------------------------+ + """ from __future__ import annotations from .camera import * # noqa: F401, F403 from .contact_sensor import * # noqa: F401, F403 +from .frame_transformer import * # noqa: F401 from .ray_caster import * # noqa: F401, F403 from .sensor_base import SensorBase # noqa: F401 from .sensor_base_cfg import SensorBaseCfg # noqa: F401 diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py new file mode 100644 index 0000000000..720486c87a --- /dev/null +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Frame transform sensor for calculating frame transform of articulations. +""" + +from __future__ import annotations + +from .frame_transformer import FrameTransformer +from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg +from .frame_transformer_data import FrameTransformerData + +__all__ = ["FrameTransformer", "FrameTransformerCfg", "FrameTransformerData", "OffsetCfg"] diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 0000000000..5968440b8e --- /dev/null +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,352 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING, Sequence + +import carb +import omni.isaac.core.utils.prims as prim_utils +from omni.isaac.core.prims import RigidPrimView +from pxr import UsdPhysics + +from omni.isaac.orbit.markers import VisualizationMarkers +from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG +from omni.isaac.orbit.utils.math import ( + combine_frame_transforms, + convert_quat, + is_identity_pose, + subtract_frame_transforms, +) + +from ..sensor_base import SensorBase +from .frame_transformer_data import FrameTransformerData + +if TYPE_CHECKING: + from .frame_transformer_cfg import FrameTransformerCfg + + +class FrameTransformer(SensorBase): + """A sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + Note: + Currently, this implementation only handles frames within an articulation. This is because the frame + regex expressions are resolved based on their parent prim path. This can be extended to handle + frames outside of articulation by using the frame prim path instead. However, this would require + additional checks to ensure that the user-specified frames are valid. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data: FrameTransformerData = FrameTransformerData() + + # visualization markers + self.transform_visualizer = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"FrameTransformer @ '{self.cfg.prim_path}': \n" + f"\ttracked body frames: {self._tracked_body_names} \n" + f"\tnumber of envs: {self._num_envs}\n" + f"\tsource frame: {self._tracked_body_names[0]}\n" + f"\ttarget frames: {self._target_frame_names} count: {len(self._target_frame_names)}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + """ + Operations + """ + + def set_debug_vis(self, debug_vis: bool): + super().set_debug_vis(debug_vis) + if self.frame_transform_visualizer is not None: + self.frame_transform_visualizer.set_visibility(debug_vis) + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = ... + # Set all reset sensors to not outdated since their value won't be updated till next sim step. + self._is_outdated[env_ids] = False + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + + # resolve source frame offset + source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) + source_frame_offset_rot = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is + # not the identity quaternion for efficiency in _update_buffer_impl + self._apply_source_frame_offset = True + # Handle source frame offsets + if is_identity_pose(source_frame_offset_pos, source_frame_offset_rot): + carb.log_verbose(f"Not applying offset for source frame as it is identity: {self.cfg.prim_path}") + self._apply_source_frame_offset = False + else: + carb.log_verbose(f"Applying offset for source frame as it is not identity: {self.cfg.prim_path}") + # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) + self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) + self._source_frame_offset_rot = source_frame_offset_rot.unsqueeze(0).repeat(self._num_envs, 1) + + # The offsets associated with each target frame + target_offsets = {} + + # Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames + # based upon the same body name and we don't want to create unnecessary views + body_names_to_frames = {} + + # The frames whose offsets are not identity + non_identity_offset_frames = [] + + # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the + # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl + self._apply_target_frame_offset = False + + # Collect all target frames, their associated body prim paths and their offsets so that we can extract + # the prim, check that it has the appropriate rigid body API in a single loop. + # First element is None because user can't specify source frame name + frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] + frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] + # First element is None because source frame offset is handled above + frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] + for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets): + # Find correct prim + for matching_prim_path in prim_utils.find_matching_prim_paths(prim_path): + prim = prim_utils.get_prim_at_path(matching_prim_path) + # check if it is a rigid prim + if not prim.HasAPI(UsdPhysics.RigidBodyAPI): + raise ValueError( + f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" + " rigid body. The class only supports transformations between rigid bodies." + ) + + body_name = matching_prim_path.rsplit("/", 1)[-1] + + # Use body_name if frame isn't specified by user + if frame is None: + frame_name = body_name + else: + frame_name = frame + + if body_name in body_names_to_frames: + body_names_to_frames[body_name].add(frame_name) + else: + body_names_to_frames[body_name] = {frame_name} + + if offset is not None: + offset_pos = torch.tensor(offset.pos, device=self.device) + offset_rot = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets + if not is_identity_pose(offset_pos, offset_rot): + non_identity_offset_frames.append(frame_name) + self._apply_target_frame_offset = True + + target_offsets[frame_name] = {"pos": offset_pos, "rot": offset_rot} + + if not self._apply_target_frame_offset: + carb.log_info( + f"Not applying offset from {self.cfg.prim_path} to target frames as all are identity: {frames}" + ) + else: + carb.log_info( + f"Applying offset from {self.cfg.prim_path} as the following frames are non-identity:" + f" {non_identity_offset_frames}" + ) + + # The names of bodies that RigidPrimView will be tracking to later extract transforms from + self._tracked_body_names = list(body_names_to_frames.keys()) + num_tracked_bodies = len(self._tracked_body_names) + + # The number of target body frames being tracked by RigidPrimView. Subtract one to remove source frame from + # count of frames + self._num_target_body_frames = num_tracked_bodies - 1 + + # Determine indices into all tracked body frames for both source and target frames + all_idxs = torch.arange(self._num_envs * num_tracked_bodies) + self._source_frame_idxs = torch.arange(self._num_envs) * num_tracked_bodies + self._target_frame_idxs = all_idxs[~torch.isin(all_idxs, self._source_frame_idxs)] + + # Construct regex expression for the body names + body_names_regex = r"(" + "|".join(self._tracked_body_names) + r")" + body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + + # Create a prim view for all frames and initialize it + # order of transforms coming out of view will be source frame followed by target frame(s) + self._frame_view = RigidPrimView(prim_paths_expr=body_names_regex, reset_xform_properties=False) + self._frame_view.initialize() + + # Determine the order in which regex evaluated body names so we can later index into frame transforms + # by frame name correctly + all_prim_paths = self._frame_view.prim_paths + + # Only need first env as the names and their orderring are the same across environments + first_env_prim_paths = all_prim_paths[0 : self._num_target_body_frames + 1] + first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths] + + target_frame_body_names = first_env_body_names[1:] + + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_rot = [] + + # The name of each of the target frame(s) - either user specified or defaulted to the body name + self._target_frame_names = [] + + # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed + # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations + # when updating sensor in _update_buffers_impl + duplicate_frame_indices = [] + + # Go through each body name and determine the number of duplicates we need for that frame + # and extract the offsets. This is all done to handles the case where multiple frames + # reference the same body, but have different names and/or offsets + for i, body_name in enumerate(target_frame_body_names): + for frame in body_names_to_frames[body_name]: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_rot.append(target_offsets[frame]["rot"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) + + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + + # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes + # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + self._duplicate_frame_indices = torch.cat( + [duplicate_frame_indices + self._num_target_body_frames * env_num for env_num in range(self._num_envs)] + ) + + # Stack up all the frame offsets + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_rot = torch.stack(target_frame_offset_rot).repeat(self._num_envs, 1) + + # fill the data buffer + self._data.target_frame_names = self._target_frame_names + self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) + self._data.source_rot_w = torch.zeros(self._num_envs, 4, device=self._device) + self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) + self._data.target_rot_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) + self._data.target_pos_source = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) + self._data.target_rot_source = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = ... + + # Extract transforms from view - shape is: + # (the total number of source and target body frames being tracked * self._num_envs, 7) + transforms = self._frame_view._physics_view.get_transforms() + + source_frames = transforms[self._source_frame_idxs] + target_frames = transforms[self._target_frame_idxs] + + # Convert quaternions as Isaac uses xyzw form + source_frames[:, 3:] = convert_quat(source_frames[:, 3:], to="wxyz") + target_frames[:, 3:] = convert_quat(target_frames[:, 3:], to="wxyz") + + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_source_frame_offset: + # Apply offsets for source frame + source_pos_w, source_rot_w = combine_frame_transforms( + source_frames[:, :3], + source_frames[:, 3:], + self._source_frame_offset_pos, + self._source_frame_offset_rot, + ) + else: + source_pos_w = source_frames[:, :3] + source_rot_w = source_frames[:, 3:] + + duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] + duplicated_target_frame_rot_w = target_frames[self._duplicate_frame_indices, 3:] + + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_target_frame_offset: + # Apply offsets for target frame + target_pos_w, target_rot_w = combine_frame_transforms( + duplicated_target_frame_pos_w, + duplicated_target_frame_rot_w, + self._target_frame_offset_pos, + self._target_frame_offset_rot, + ) + else: + target_pos_w = duplicated_target_frame_pos_w + target_rot_w = duplicated_target_frame_rot_w + + total_num_frames = len(self._target_frame_names) + + target_pos_source, target_rot_source = subtract_frame_transforms( + source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), + source_rot_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), + target_pos_w, + target_rot_w, + ) + + # Update buffers + # NOTE: The frame names / orderring don't change so no need to update them after initialization + self._data.source_pos_w[:] = source_pos_w.view(-1, 3) + self._data.source_rot_w[:] = source_rot_w.view(-1, 4) + self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) + self._data.target_rot_w[:] = target_rot_w.view(-1, total_num_frames, 4) + self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) + self._data.target_rot_source[:] = target_rot_source.view(-1, total_num_frames, 4) + + def _debug_vis_impl(self): + # visualize the transform + if self.transform_visualizer is None: + cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") + cfg.markers["frame"].scale = (0.05, 0.05, 0.05) + self.transform_visualizer = VisualizationMarkers(cfg) + + self.transform_visualizer.visualize(self._data.target_pos_w.view(-1, 3), self._data.target_rot_w.view(-1, 4)) diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py new file mode 100644 index 0000000000..9e48f72267 --- /dev/null +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_cfg.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from __future__ import annotations + +from dataclasses import MISSING + +from omni.isaac.orbit.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .frame_transformer import FrameTransformer + + +@configclass +class OffsetCfg: + """The offset pose of one frame relative to another frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation ``(w, x, y, z)`` w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + +@configclass +class FrameTransformerCfg(SensorBaseCfg): + """Configuration for the frame transformer sensor.""" + + @configclass + class FrameCfg: + """Information specific to a coordinate frame.""" + + prim_path: str = MISSING + """The prim path corresponding to the parent rigid body. + + This prim should be part of the same articulation as :attr:`FrameTransformerCfg.prim_path`. + """ + name: str | None = None + """User-defined name for the new coordinate frame. Defaults to None. + + If None, then the name is extracted from the leaf of the prim path. + """ + + offset: OffsetCfg = OffsetCfg() + """The pose offset from the parent prim frame.""" + + class_type: type = FrameTransformer + + prim_path: str = MISSING + """The prim path of the body to transform from (source frame).""" + + source_frame_offset: OffsetCfg = OffsetCfg() + """The pose offset from the source prim frame.""" + + target_frames: list[FrameCfg] = MISSING + """A list of the target frames. + + This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped, + we can use a single FrameTransformer to track each foot's position and orientation in the body + frame using four frame offsets. + """ diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 0000000000..e3b3b2fa55 --- /dev/null +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,53 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from dataclasses import dataclass + + +@dataclass +class FrameTransformerData: + """Data container for the frame transformer sensor.""" + + target_frame_names: list[str] = None + """Target frame names (this denotes the order that frame data will be ordered). + + The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. + This usually follows the order in which the frames are defined in the config. However, in the case of + regex matching, the order may be different. + """ + + target_pos_source: torch.Tensor = None + """Position of the target frame(s) relative to source frame. + + Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. + """ + target_rot_source: torch.Tensor = None + """Orientation of the target frame(s) relative to source frame quaternion ``(w, x, y, z)``. + + Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. + """ + target_pos_w: torch.Tensor = None + """Position of the target frame(s) after offset (in world frame). + + Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. + """ + target_rot_w: torch.Tensor = None + """Orientation of the target frame(s) after offset (in world frame) quaternion ``(w, x, y, z)``. + + Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. + """ + source_pos_w: torch.Tensor = None + """Position of the source frame after offset (in world frame). + + Shape is (N, 3), where N is the number of environments. + """ + source_rot_w: torch.Tensor = None + """Orientation of the source frame after offset (in world frame) quaternion ``(w, x, y, z)``. + + Shape is (N, 4), where N is the number of environments. + """ diff --git a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/math.py b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/math.py index 5475c31ac8..72e323bbd1 100644 --- a/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/math.py +++ b/source/extensions/omni.isaac.orbit/omni/isaac/orbit/utils/math.py @@ -53,6 +53,7 @@ "quat_rotate", "quat_rotate_inverse", # Transformations + "is_identity_pose", "combine_frame_transforms", "subtract_frame_transforms", "compute_pose_error", @@ -524,6 +525,27 @@ def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tenso """ +def is_identity_pose(pos: torch.tensor, rot: torch.tensor) -> bool: + """Checks if input poses are identity transforms. + + The function checks if the input position and orientation are close to zero and + identity respectively using L2-norm. It does NOT check the error in the orientation. + + Args: + pos: The cartesian position. Shape is (N, 3). + rot: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + + Returns: + True if all the input poses result in identity transform. Otherwise, False. + """ + # create identity transformations + pos_identity = torch.zeros_like(pos) + rot_identity = torch.zeros_like(rot) + rot_identity[..., 0] = 1 + # compare input to identity + return torch.allclose(pos, pos_identity) and torch.allclose(rot, rot_identity) + + # @torch.jit.script def combine_frame_transforms( t01: torch.Tensor, q01: torch.Tensor, t12: torch.Tensor = None, q12: torch.Tensor = None diff --git a/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py b/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py new file mode 100644 index 0000000000..37bb0f7510 --- /dev/null +++ b/source/extensions/omni.isaac.orbit/test/sensors/check_frame_transformer.py @@ -0,0 +1,254 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script checks the FrameTransformer sensor by visualizing the frames that it creates. +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +import math +from scipy.spatial.transform import Rotation + +from omni.isaac.orbit.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="This script checks the FrameTransformer sensor by visualizing the frames that it creates." +) +parser.add_argument( + "--mode", + type=str, + choices=["feet", "all"], + default="all", + help="Dictates the the type of frames to use for FrameTransformer.", +) +parser.add_argument( + "--visualize", + action="store_true", + help=( + "Whether to enable FrameTransformer's debug_vis (True) or visualize each frame one at a" + " time and print to console (False)." + ), +) +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=args_cli.headless) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import omni.isaac.orbit.sim as sim_utils +from omni.isaac.orbit.assets import AssetBaseCfg +from omni.isaac.orbit.assets.config.anymal import ANYMAL_C_CFG +from omni.isaac.orbit.markers import VisualizationMarkers +from omni.isaac.orbit.markers.config import FRAME_MARKER_CFG +from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.orbit.sensors import FrameTransformerCfg, OffsetCfg +from omni.isaac.orbit.sim import SimulationContext +from omni.isaac.orbit.terrains import TerrainImporterCfg +from omni.isaac.orbit.utils import configclass +from omni.isaac.orbit.utils.timer import Timer + + +def quat_from_euler_rpy(roll, pitch, yaw, degrees=False): + """Converts Euler XYZ to Quaternion (w, x, y, z).""" + quat = Rotation.from_euler("xyz", (roll, pitch, yaw), degrees=degrees).as_quat() + return tuple(quat[[3, 0, 1, 2]].tolist()) + + +def euler_rpy_apply(rpy, xyz, degrees=False): + """Applies rotation from Euler XYZ on position vector.""" + rot = Rotation.from_euler("xyz", rpy, degrees=degrees) + return tuple(rot.apply(xyz).tolist()) + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration.""" + + # terrain - flat terrain plane + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="plane", + ) + + # articulation - robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + if args_cli.mode == "feet": + # Example where only feet position frames are created + frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="LF_FOOT", + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RF_FOOT", + prim_path="{ENV_REGEX_NS}/Robot/RF_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="LH_FOOT", + prim_path="{ENV_REGEX_NS}/Robot/LH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(-0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + FrameTransformerCfg.FrameCfg( + name="RH_FOOT", + prim_path="{ENV_REGEX_NS}/Robot/RH_SHANK", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, math.pi / 2), xyz=(-0.08795, -0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, math.pi / 2), + ), + ), + ], + debug_vis=args_cli.visualize, + ) + + elif args_cli.mode == "all": + # Example using .* to get full body + LF_FOOT to ensure these work together + frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*"), + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_SHANK", + name="LF_FOOT", + offset=OffsetCfg( + pos=euler_rpy_apply(rpy=(0, 0, -math.pi / 2), xyz=(0.08795, 0.01305, -0.33797)), + rot=quat_from_euler_rpy(0, 0, -math.pi / 2), + ), + ), + ], + debug_vis=args_cli.visualize, + ) + + # extras - light + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)), + init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, 500.0)), + ) + + +def main(): + """Main function.""" + + # Load kit helper + sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + # Set main camera + sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) + + # Spawn things into stage + with Timer("Setup scene"): + scene = InteractiveScene(MySceneCfg(num_envs=args_cli.num_envs, env_spacing=5.0, lazy_sensor_update=False)) + + # Play the simulator + with Timer("Time taken to play the simulator"): + sim.reset() + + # Now we are ready! + print("[INFO]: Setup complete...") + + # default joint targets + robot_actions = scene.articulations["robot"].data.default_joint_pos.clone() + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # We only want one visualization at a time. This visualizer will be used + # to step through each frame so the user can verify that the correct frame + # is being visualized as the frame names are printing to console + if not args_cli.visualize: + cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") + cfg.markers["frame"].scale = (0.05, 0.05, 0.05) + transform_visualizer = VisualizationMarkers(cfg) + else: + transform_visualizer = None + + frame_index = 0 + # Simulate physics + while simulation_app.is_running(): + # If simulation is stopped, then exit. + if sim.is_stopped(): + break + # If simulation is paused, then skip. + if not sim.is_playing(): + sim.step() + continue + # # reset + if count % 50 == 0: + # reset counters + sim_time = 0.0 + count = 0 + # reset root state + root_state = scene.articulations["robot"].data.default_root_state_w.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos + joint_vel = scene.articulations["robot"].data.default_joint_vel + # -- set root state + # -- robot + scene.articulations["robot"].write_root_state_to_sim(root_state) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + root_state[:, 1] += 1.0 + # reset buffers + scene.reset() + print(">>>>>>>> Reset!") + # perform this loop at policy control freq (50 Hz) + for _ in range(4): + # set joint targets + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # read data from sim + scene.update(sim_dt) + + # Change the frame that we are visualizing to ensure that frame names + # are correctly associated with the frames + if not args_cli.visualize: + if count % 50 == 0: + frame_names = scene["frame_transformer"].data.target_frame_names + + frame_name = frame_names[frame_index] + print(f"Displaying {frame_index}: {frame_name}") + frame_index += 1 + frame_index = frame_index % len(frame_names) + # visualize frame + pos = scene["frame_transformer"].data.target_pos_w[:, frame_index] + rot = scene["frame_transformer"].data.target_rot_w[:, frame_index] + transform_visualizer.visualize(pos, rot) + + # update sim-time + sim_time += sim_dt * 4 + count += 1 + + +if __name__ == "__main__": + # Run the main function + main() + # Close the simulator + simulation_app.close() diff --git a/source/extensions/omni.isaac.orbit/test/utils/test_math.py b/source/extensions/omni.isaac.orbit/test/utils/test_math.py new file mode 100644 index 0000000000..0c49c93d52 --- /dev/null +++ b/source/extensions/omni.isaac.orbit/test/utils/test_math.py @@ -0,0 +1,52 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES, ETH Zurich, and University of Toronto +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +import unittest + +"""Launch Isaac Sim Simulator first. + +This is only needed because of warp dependency. +""" + +from omni.isaac.orbit.app import AppLauncher + +# launch omniverse app in headless mode +app_launcher = AppLauncher(headless=True) + +import omni.isaac.orbit.utils.math as math_utils + + +class TestMathUtilities(unittest.TestCase): + """Test fixture for checking math utilities in Orbit.""" + + def test_is_identity_pose(self): + """Test is_identity_pose method.""" + identity_pos_one_row = torch.zeros(3) + identity_rot_one_row = torch.tensor((1.0, 0.0, 0.0, 0.0)) + + self.assertTrue(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row)) + + identity_pos_one_row[0] = 1.0 + identity_rot_one_row[1] = 1.0 + + self.assertFalse(math_utils.is_identity_pose(identity_pos_one_row, identity_rot_one_row)) + + identity_pos_multi_row = torch.zeros(3, 3) + identity_rot_multi_row = torch.zeros(3, 4) + identity_rot_multi_row[:, 0] = 1.0 + + self.assertTrue(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row)) + + identity_pos_multi_row[0, 0] = 1.0 + identity_rot_multi_row[0, 1] = 1.0 + + self.assertFalse(math_utils.is_identity_pose(identity_pos_multi_row, identity_rot_multi_row)) + + +if __name__ == "__main__": + unittest.main()