diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index bf3c442a..edbbad38 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -44,7 +44,7 @@ pretrained_policy_path = "lerobot/diffusion_pusht" # OR a path to a local outputs/train folder. # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion") -policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, map_location=device) +policy = DiffusionPolicy.from_pretrained(pretrained_policy_path) # Initialize evaluation environment to render two observation types: # an image of the scene and state/position of the agent. The environment diff --git a/tests/test_policies.py b/tests/test_policies.py index b54810f3..adc55f0b 100644 --- a/tests/test_policies.py +++ b/tests/test_policies.py @@ -252,11 +252,12 @@ def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name: policy_cfg.input_features = { key: ft for key, ft in features.items() if key not in policy_cfg.output_features } - policy = policy_cls(policy_cfg) # config.device = gpu + policy = policy_cls(policy_cfg) + policy.to(policy_cfg.device) save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}" policy.save_pretrained(save_dir) - policy_ = policy_cls.from_pretrained(save_dir, config=policy_cfg) - assert all(torch.equal(p, p_) for p, p_ in zip(policy.parameters(), policy_.parameters(), strict=True)) + loaded_policy = policy_cls.from_pretrained(save_dir, config=policy_cfg) + torch.testing.assert_close(list(policy.parameters()), list(loaded_policy.parameters()), rtol=0, atol=0) @pytest.mark.parametrize("insert_temporal_dim", [False, True])