Skip to content

Commit ee3f022

Browse files
fan-ziqikellyguo11
andauthored
Adds clip range for JointAction (#1476)
# Description This PR adds support for action clip to all mdp/actions. Clip ranges can be specified as a dictionary of joint names and tuple for the lower and upper bounds of the clip in the ActionTermCfg. ## Type of change - New feature (non-breaking change which adds functionality) - This change requires a documentation update ## Checklist - [x] I have run the [`pre-commit` checks](https://pre-commit.com/) with `./isaaclab.sh --format` - [ ] I have made corresponding changes to the documentation - [x] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [x] I have updated the changelog and the corresponding version in the extension's `config/extension.toml` file - [x] I have added my name to the `CONTRIBUTORS.md` or my name already exists there --------- Signed-off-by: Kelly Guo <kellyg@nvidia.com> Co-authored-by: Kelly Guo <kellyg@nvidia.com>
1 parent 8ddc483 commit ee3f022

File tree

8 files changed

+99
-2
lines changed

8 files changed

+99
-2
lines changed

source/extensions/omni.isaac.lab/config/extension.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22

33
# Note: Semantic Versioning is used: https://semver.org/
4-
version = "0.27.28"
4+
version = "0.27.29"
55

66
# Description
77
title = "Isaac Lab framework for Robot Learning"

source/extensions/omni.isaac.lab/docs/CHANGELOG.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,15 @@
11
Changelog
22
---------
33

4+
0.27.29 (2024-12-15)
5+
~~~~~~~~~~~~~~~~~~~~
6+
7+
Added
8+
^^^^^
9+
10+
* Added action clip to all :class:`omni.isaac.lab.envs.mdp.actions`.
11+
12+
413
0.27.28 (2024-12-14)
514
~~~~~~~~~~~~~~~~~~~~
615

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/binary_joint_actions.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,10 @@ class BinaryJointAction(ActionTerm):
4040

4141
cfg: actions_cfg.BinaryJointActionCfg
4242
"""The configuration of the action term."""
43-
4443
_asset: Articulation
4544
"""The articulation asset on which the action term is applied."""
45+
_clip: torch.Tensor
46+
"""The clip applied to the input action."""
4647

4748
def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: ManagerBasedEnv) -> None:
4849
# initialize the action term
@@ -83,6 +84,17 @@ def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: ManagerBasedEnv)
8384
)
8485
self._close_command[index_list] = torch.tensor(value_list, device=self.device)
8586

87+
# parse clip
88+
if self.cfg.clip is not None:
89+
if isinstance(cfg.clip, dict):
90+
self._clip = torch.tensor([[-float("inf"), float("inf")]], device=self.device).repeat(
91+
self.num_envs, self.action_dim, 1
92+
)
93+
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names)
94+
self._clip[:, index_list] = torch.tensor(value_list, device=self.device)
95+
else:
96+
raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.")
97+
8698
"""
8799
Properties.
88100
"""
@@ -115,6 +127,10 @@ def process_actions(self, actions: torch.Tensor):
115127
binary_mask = actions < 0
116128
# compute the command
117129
self._processed_actions = torch.where(binary_mask, self._close_command, self._open_command)
130+
if self.cfg.clip is not None:
131+
self._processed_actions = torch.clamp(
132+
self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]
133+
)
118134

119135
def reset(self, env_ids: Sequence[int] | None = None) -> None:
120136
self._raw_actions[env_ids] = 0.0

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ class JointAction(ActionTerm):
5050
"""The scaling factor applied to the input action."""
5151
_offset: torch.Tensor | float
5252
"""The offset applied to the input action."""
53+
_clip: torch.Tensor
54+
"""The clip applied to the input action."""
5355

5456
def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> None:
5557
# initialize the action term
@@ -94,6 +96,16 @@ def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> Non
9496
self._offset[:, index_list] = torch.tensor(value_list, device=self.device)
9597
else:
9698
raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.")
99+
# parse clip
100+
if self.cfg.clip is not None:
101+
if isinstance(cfg.clip, dict):
102+
self._clip = torch.tensor([[-float("inf"), float("inf")]], device=self.device).repeat(
103+
self.num_envs, self.action_dim, 1
104+
)
105+
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names)
106+
self._clip[:, index_list] = torch.tensor(value_list, device=self.device)
107+
else:
108+
raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.")
97109

98110
"""
99111
Properties.
@@ -120,6 +132,11 @@ def process_actions(self, actions: torch.Tensor):
120132
self._raw_actions[:] = actions
121133
# apply the affine transformations
122134
self._processed_actions = self._raw_actions * self._scale + self._offset
135+
# clip actions
136+
if self.cfg.clip is not None:
137+
self._processed_actions = torch.clamp(
138+
self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]
139+
)
123140

124141
def reset(self, env_ids: Sequence[int] | None = None) -> None:
125142
self._raw_actions[env_ids] = 0.0

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ class JointPositionToLimitsAction(ActionTerm):
4444
"""The articulation asset on which the action term is applied."""
4545
_scale: torch.Tensor | float
4646
"""The scaling factor applied to the input action."""
47+
_clip: torch.Tensor
48+
"""The clip applied to the input action."""
4749

4850
def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: ManagerBasedEnv):
4951
# initialize the action term
@@ -76,6 +78,16 @@ def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: Manager
7678
self._scale[:, index_list] = torch.tensor(value_list, device=self.device)
7779
else:
7880
raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.")
81+
# parse clip
82+
if self.cfg.clip is not None:
83+
if isinstance(cfg.clip, dict):
84+
self._clip = torch.tensor([[-float("inf"), float("inf")]], device=self.device).repeat(
85+
self.num_envs, self.action_dim, 1
86+
)
87+
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names)
88+
self._clip[:, index_list] = torch.tensor(value_list, device=self.device)
89+
else:
90+
raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.")
7991

8092
"""
8193
Properties.
@@ -102,6 +114,10 @@ def process_actions(self, actions: torch.Tensor):
102114
self._raw_actions[:] = actions
103115
# apply affine transformations
104116
self._processed_actions = self._raw_actions * self._scale
117+
if self.cfg.clip is not None:
118+
self._processed_actions = torch.clamp(
119+
self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]
120+
)
105121
# rescale the position targets if configured
106122
# this is useful when the input actions are in the range [-1, 1]
107123
if self.cfg.rescale_to_limits:

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/non_holonomic_actions.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import omni.log
1313

14+
import omni.isaac.lab.utils.string as string_utils
1415
from omni.isaac.lab.assets.articulation import Articulation
1516
from omni.isaac.lab.managers.action_manager import ActionTerm
1617
from omni.isaac.lab.utils.math import euler_xyz_from_quat
@@ -59,6 +60,8 @@ class NonHolonomicAction(ActionTerm):
5960
"""The scaling factor applied to the input action. Shape is (1, 2)."""
6061
_offset: torch.Tensor
6162
"""The offset applied to the input action. Shape is (1, 2)."""
63+
_clip: torch.Tensor
64+
"""The clip applied to the input action."""
6265

6366
def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: ManagerBasedEnv):
6467
# initialize the action term
@@ -104,6 +107,16 @@ def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: ManagerBasedEnv)
104107
# save the scale and offset as tensors
105108
self._scale = torch.tensor(self.cfg.scale, device=self.device).unsqueeze(0)
106109
self._offset = torch.tensor(self.cfg.offset, device=self.device).unsqueeze(0)
110+
# parse clip
111+
if self.cfg.clip is not None:
112+
if isinstance(cfg.clip, dict):
113+
self._clip = torch.tensor([[-float("inf"), float("inf")]], device=self.device).repeat(
114+
self.num_envs, self.action_dim, 1
115+
)
116+
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names)
117+
self._clip[:, index_list] = torch.tensor(value_list, device=self.device)
118+
else:
119+
raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.")
107120

108121
"""
109122
Properties.
@@ -129,6 +142,11 @@ def process_actions(self, actions):
129142
# store the raw actions
130143
self._raw_actions[:] = actions
131144
self._processed_actions = self.raw_actions * self._scale + self._offset
145+
# clip actions
146+
if self.cfg.clip is not None:
147+
self._processed_actions = torch.clamp(
148+
self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]
149+
)
132150

133151
def apply_actions(self):
134152
# obtain current heading

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/task_space_actions.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
import omni.log
1313

1414
import omni.isaac.lab.utils.math as math_utils
15+
import omni.isaac.lab.utils.string as string_utils
1516
from omni.isaac.lab.assets.articulation import Articulation
1617
from omni.isaac.lab.controllers.differential_ik import DifferentialIKController
1718
from omni.isaac.lab.managers.action_manager import ActionTerm
@@ -42,6 +43,8 @@ class DifferentialInverseKinematicsAction(ActionTerm):
4243
"""The articulation asset on which the action term is applied."""
4344
_scale: torch.Tensor
4445
"""The scaling factor applied to the input action. Shape is (1, action_dim)."""
46+
_clip: torch.Tensor
47+
"""The clip applied to the input action."""
4548

4649
def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env: ManagerBasedEnv):
4750
# initialize the action term
@@ -101,6 +104,17 @@ def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env:
101104
else:
102105
self._offset_pos, self._offset_rot = None, None
103106

107+
# parse clip
108+
if self.cfg.clip is not None:
109+
if isinstance(cfg.clip, dict):
110+
self._clip = torch.tensor([[-float("inf"), float("inf")]], device=self.device).repeat(
111+
self.num_envs, self.action_dim, 1
112+
)
113+
index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names)
114+
self._clip[:, index_list] = torch.tensor(value_list, device=self.device)
115+
else:
116+
raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.")
117+
104118
"""
105119
Properties.
106120
"""
@@ -138,6 +152,10 @@ def process_actions(self, actions: torch.Tensor):
138152
# store the raw actions
139153
self._raw_actions[:] = actions
140154
self._processed_actions[:] = self.raw_actions * self._scale
155+
if self.cfg.clip is not None:
156+
self._processed_actions = torch.clamp(
157+
self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]
158+
)
141159
# obtain quantities from simulation
142160
ee_pos_curr, ee_quat_curr = self._compute_frame_pose()
143161
# set command into controller

source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,9 @@ class for more details.
9393
debug_vis: bool = False
9494
"""Whether to visualize debug information. Defaults to False."""
9595

96+
clip: dict[str, tuple] | None = None
97+
"""Clip range for the action (dict of regex expressions). Defaults to None."""
98+
9699

97100
##
98101
# Command manager.

0 commit comments

Comments
 (0)