unitree_rl_gym/legged_gym/envs/h1/h1_config.py

61 lines
2.4 KiB
Python
Raw Normal View History

2024-01-12 10:18:20 +08:00
from legged_gym.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO
class H1RoughCfg( LeggedRobotCfg ):
class init_state( LeggedRobotCfg.init_state ):
pos = [0.0, 0.0, 1.0] # x,y,z [m]
default_joint_angles = { # = target angles [rad] when action = 0.0
'left_hip_yaw_joint' : 0. ,
'left_hip_roll_joint' : 0,
'left_hip_pitch_joint' : -0.4,
'left_knee_joint' : 0.8,
'left_ankle_joint' : -0.4,
'right_hip_yaw_joint' : 0.,
'right_hip_roll_joint' : 0,
'right_hip_pitch_joint' : -0.4,
'right_knee_joint' : 0.8,
'right_ankle_joint' : -0.4,
'torso_joint' : 0.,
'left_shoulder_pitch_joint' : 0.,
'left_shoulder_roll_joint' : 0,
'left_shoulder_yaw_joint' : 0.,
'left_elbow_joint' : 0.,
'right_shoulder_pitch_joint' : 0.,
'right_shoulder_roll_joint' : 0.0,
'right_shoulder_yaw_joint' : 0.,
'right_elbow_joint' : 0.,
}
class control( LeggedRobotCfg.control ):
# PD Drive parameters:
control_type = 'P'
stiffness = {'joint': 20.} # [N*m/rad]
damping = {'joint': 0.5} # [N*m*s/rad]
# action scale: target angle = actionScale * action + defaultAngle
action_scale = 0.25
# decimation: Number of control action updates @ sim DT per policy DT
decimation = 4
class asset( LeggedRobotCfg.asset ):
file = '{LEGGED_GYM_ROOT_DIR}/resources/robots/h1/urdf/h1.urdf'
name = "h1"
foot_name = "ankle"
penalize_contacts_on = ["thigh", "calf"]
terminate_after_contacts_on = ["base"]
self_collisions = 1 # 1 to disable, 0 to enable...bitwise filter
class rewards( LeggedRobotCfg.rewards ):
soft_dof_pos_limit = 0.9
base_height_target = 0.98
class scales( LeggedRobotCfg.rewards.scales ):
torques = -0.0002
dof_pos_limits = -10.0
class H1RoughCfgPPO( LeggedRobotCfgPPO ):
class algorithm( LeggedRobotCfgPPO.algorithm ):
entropy_coef = 0.01
class runner( LeggedRobotCfgPPO.runner ):
run_name = ''
experiment_name = 'rough_go2'