2024-01-29 20:49:30 +08:00
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
import hydra
|
|
|
|
import imageio
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
2024-01-31 21:48:12 +08:00
|
|
|
from tensordict.nn import TensorDictModule
|
2024-01-29 20:49:30 +08:00
|
|
|
from termcolor import colored
|
2024-01-31 21:54:32 +08:00
|
|
|
from torchrl.envs import EnvBase
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-01-31 21:48:12 +08:00
|
|
|
from lerobot.common.envs.factory import make_env
|
|
|
|
from lerobot.common.tdmpc import TDMPC
|
|
|
|
from lerobot.common.utils import set_seed
|
2024-01-29 20:49:30 +08:00
|
|
|
|
|
|
|
|
2024-01-31 21:48:12 +08:00
|
|
|
def eval_policy(
|
2024-01-31 21:54:32 +08:00
|
|
|
env: EnvBase,
|
|
|
|
policy: TensorDictModule = None,
|
|
|
|
num_episodes: int = 10,
|
|
|
|
max_steps: int = 30,
|
|
|
|
save_video: bool = False,
|
|
|
|
video_dir: Path = None,
|
2024-01-29 20:49:30 +08:00
|
|
|
):
|
2024-01-31 21:48:12 +08:00
|
|
|
rewards = []
|
|
|
|
successes = []
|
2024-01-29 20:49:30 +08:00
|
|
|
for i in range(num_episodes):
|
2024-01-31 21:48:12 +08:00
|
|
|
ep_frames = []
|
|
|
|
|
|
|
|
def rendering_callback(env, td=None):
|
|
|
|
nonlocal ep_frames
|
|
|
|
frame = env.render()
|
|
|
|
ep_frames.append(frame)
|
|
|
|
|
|
|
|
tensordict = env.reset()
|
|
|
|
# render first frame before rollout
|
|
|
|
rendering_callback(env)
|
|
|
|
|
|
|
|
rollout = env.rollout(
|
2024-01-31 21:54:32 +08:00
|
|
|
max_steps=max_steps,
|
2024-01-31 21:48:12 +08:00
|
|
|
policy=policy,
|
|
|
|
callback=rendering_callback,
|
|
|
|
auto_reset=False,
|
|
|
|
tensordict=tensordict,
|
|
|
|
)
|
|
|
|
ep_reward = rollout["next", "reward"].sum()
|
|
|
|
ep_success = rollout["next", "success"].any()
|
|
|
|
rewards.append(ep_reward.item())
|
|
|
|
successes.append(ep_success.item())
|
2024-01-31 07:30:14 +08:00
|
|
|
|
2024-01-29 20:49:30 +08:00
|
|
|
if save_video:
|
2024-01-31 21:48:12 +08:00
|
|
|
video_dir.parent.mkdir(parents=True, exist_ok=True)
|
2024-01-29 20:49:30 +08:00
|
|
|
# TODO(rcadene): make fps configurable
|
2024-01-31 21:48:12 +08:00
|
|
|
video_path = video_dir / f"eval_episode_{i}.mp4"
|
|
|
|
imageio.mimsave(video_path, np.stack(ep_frames), fps=15)
|
|
|
|
|
|
|
|
metrics = {
|
|
|
|
"avg_reward": np.nanmean(rewards),
|
|
|
|
"pc_success": np.nanmean(successes) * 100,
|
2024-01-29 20:49:30 +08:00
|
|
|
}
|
2024-01-31 21:48:12 +08:00
|
|
|
return metrics
|
2024-01-29 20:49:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
@hydra.main(version_base=None, config_name="default", config_path="../configs")
|
|
|
|
def eval(cfg: dict):
|
|
|
|
assert torch.cuda.is_available()
|
|
|
|
set_seed(cfg.seed)
|
|
|
|
print(colored("Log dir:", "yellow", attrs=["bold"]), cfg.log_dir)
|
|
|
|
|
|
|
|
env = make_env(cfg)
|
2024-01-31 21:48:12 +08:00
|
|
|
policy = TDMPC(cfg)
|
2024-01-31 07:30:14 +08:00
|
|
|
# ckpt_path = "/home/rcadene/code/fowm/logs/xarm_lift/all/default/2/models/offline.pt"
|
|
|
|
ckpt_path = "/home/rcadene/code/fowm/logs/xarm_lift/all/default/2/models/final.pt"
|
2024-01-31 21:48:12 +08:00
|
|
|
policy.load(ckpt_path)
|
2024-01-31 07:30:14 +08:00
|
|
|
|
2024-01-31 21:48:12 +08:00
|
|
|
policy = TensorDictModule(
|
|
|
|
policy,
|
|
|
|
in_keys=["observation", "step_count"],
|
|
|
|
out_keys=["action"],
|
|
|
|
)
|
|
|
|
|
2024-01-31 21:54:32 +08:00
|
|
|
# policy can be None to rollout a random policy
|
2024-01-31 21:48:12 +08:00
|
|
|
metrics = eval_policy(
|
2024-01-31 07:30:14 +08:00
|
|
|
env,
|
2024-01-31 21:54:32 +08:00
|
|
|
policy=policy,
|
2024-01-31 07:30:14 +08:00
|
|
|
num_episodes=10,
|
|
|
|
save_video=True,
|
2024-01-31 21:48:12 +08:00
|
|
|
video_dir=Path("tmp/2023_01_29_xarm_lift_final"),
|
2024-01-31 07:30:14 +08:00
|
|
|
)
|
2024-01-31 21:48:12 +08:00
|
|
|
print(metrics)
|
2024-01-29 20:49:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
eval()
|