diff --git a/roboverse_pack/callback_funcs/humanoid/reset_funcs.py b/roboverse_pack/callback_funcs/humanoid/reset_funcs.py index 083603b9d..4714fcd6b 100644 --- a/roboverse_pack/callback_funcs/humanoid/reset_funcs.py +++ b/roboverse_pack/callback_funcs/humanoid/reset_funcs.py @@ -37,7 +37,7 @@ def random_root_state( env.setup_initial_env_states.robots[env.name].root_state[env_ids, 0:3] = positions env.setup_initial_env_states.robots[env.name].root_state[env_ids, 3:7] = orientations env.setup_initial_env_states.robots[env.name].root_state[env_ids, 7:13] = velocities - # # set into the physics simulation + # set into the physics simulation # env.write_robot_root_state(torch.cat([positions, orientations, velocities], dim=-1), env_ids=env_ids) diff --git a/roboverse_pack/callback_funcs/humanoid/reward_funcs.py b/roboverse_pack/callback_funcs/humanoid/reward_funcs.py index 6c3ad78ea..1a35a8bbb 100644 --- a/roboverse_pack/callback_funcs/humanoid/reward_funcs.py +++ b/roboverse_pack/callback_funcs/humanoid/reward_funcs.py @@ -11,7 +11,7 @@ def track_lin_vel_xy(env: EnvTypes, env_states: TensorState, std: float) -> torch.Tensor: """Reward tracking of linear velocity commands (xy axes) in the gravity aligned robot frame using exponential kernel.""" - # extract the used quantities (to enable type-hinting) + # extract the used quantities (to enable type-hinting). robot_state = env_states.robots[env.name] base_quat = robot_state.root_state[:, 3:7] base_lin_vel = quat_rotate_inverse(base_quat, robot_state.root_state[:, 7:10])