diff --git a/legged_gym/legged_gym/scripts/play_vel.py b/legged_gym/legged_gym/scripts/play_vel.py index 34b6e03..295f798 100644 --- a/legged_gym/legged_gym/scripts/play_vel.py +++ b/legged_gym/legged_gym/scripts/play_vel.py @@ -113,7 +113,7 @@ def play(args): if "one_obstacle_per_track" in env_cfg.terrain.BarrierTrack_kwargs.keys(): env_cfg.terrain.BarrierTrack_kwargs.pop("one_obstacle_per_track") - env_cfg.terrain.BarrierTrack_kwargs["n_obstacles_per_track"] = 2# 2 + env_cfg.terrain.BarrierTrack_kwargs["n_obstacles_per_track"] = 1# 2 env_cfg.commands.ranges.lin_vel_x = [3.0, 3.0] # [1.2, 1.2] env_cfg.terrain.BarrierTrack_kwargs['track_block_length']= 2. if "distill" in args.task: diff --git a/rsl_rl/rsl_rl/algorithms/ppo.py b/rsl_rl/rsl_rl/algorithms/ppo.py index ddb3592..92f7910 100644 --- a/rsl_rl/rsl_rl/algorithms/ppo.py +++ b/rsl_rl/rsl_rl/algorithms/ppo.py @@ -238,7 +238,7 @@ class PPO: if current_learning_iteration is None: vel_loss = 0 else: - vel_loss = torch.square(velocity-2).mean() * np.exp(-0.01 * current_learning_iteration + 165) + vel_loss = torch.square(velocity-2).mean() * np.exp(-0.001 * current_learning_iteration + 12) vel_loss += torch.square(torch.clamp_max(velocity, 1.) - 1).mean() return_ = dict(