2024-03-27 00:13:40 +08:00
|
|
|
"""
|
|
|
|
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
|
|
|
|
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
|
|
|
|
"""
|
|
|
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
from huggingface_hub import snapshot_download
|
|
|
|
|
|
|
|
from lerobot.scripts.eval import eval
|
|
|
|
|
|
|
|
# Get a pretrained policy from the hub.
|
2024-05-05 19:06:58 +08:00
|
|
|
pretrained_policy_name = "lerobot/diffusion_pusht"
|
2024-05-01 23:17:18 +08:00
|
|
|
pretrained_policy_path = Path(snapshot_download(pretrained_policy_name))
|
2024-03-27 00:13:40 +08:00
|
|
|
# OR uncomment the following to evaluate a policy from the local outputs/train folder.
|
2024-05-01 23:17:18 +08:00
|
|
|
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
|
2024-03-27 00:13:40 +08:00
|
|
|
|
|
|
|
# Override some config parameters to do with evaluation.
|
|
|
|
overrides = [
|
2024-04-30 23:08:59 +08:00
|
|
|
"eval.n_episodes=10",
|
|
|
|
"eval.batch_size=10",
|
2024-03-27 00:13:40 +08:00
|
|
|
"device=cuda",
|
|
|
|
]
|
|
|
|
|
|
|
|
# Evaluate the policy and save the outputs including metrics and videos.
|
2024-05-01 23:17:18 +08:00
|
|
|
# TODO(rcadene, alexander-soare): dont call eval, but add the minimal code snippet to rollout
|
|
|
|
eval(pretrained_policy_path=pretrained_policy_path)
|