From 68d02c80cff25cb8852a4c30e3d2162d7f2b2de5 Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Wed, 27 Mar 2024 12:03:19 +0000 Subject: [PATCH 1/3] Remove b/c workaround --- README.md | 1 - lerobot/common/datasets/factory.py | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/README.md b/README.md index 35911869..c621897a 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,6 @@ Or you can achieve the same result by executing our script from the command line ```bash python lerobot/scripts/eval.py \ --hub-id lerobot/diffusion_policy_pusht_image \ ---revision v1.0 \ eval_episodes=10 \ hydra.run.dir=outputs/eval/example_hub ``` diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 4212e023..04077034 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -81,13 +81,8 @@ def make_offline_buffer( else: raise ValueError(cfg.env.name) - # TODO(rcadene): backward compatiblity to load pretrained pusht policy - dataset_id = cfg.get("dataset_id") - if dataset_id is None and cfg.env.name == "pusht": - dataset_id = "pusht" - offline_buffer = clsfunc( - dataset_id=dataset_id, + dataset_id=cfg.dataset_id, sampler=sampler, batch_size=batch_size, root=DATA_DIR, From dc745e3037289a8c93cde66523e801a688321eac Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Wed, 27 Mar 2024 13:05:13 +0000 Subject: [PATCH 2/3] Remove unused part of diffusion policy config --- lerobot/configs/policy/diffusion.yaml | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/lerobot/configs/policy/diffusion.yaml b/lerobot/configs/policy/diffusion.yaml index acb368ed..4d6eedca 100644 --- a/lerobot/configs/policy/diffusion.yaml +++ b/lerobot/configs/policy/diffusion.yaml @@ -103,29 +103,3 @@ optimizer: betas: [0.95, 0.999] eps: 1.0e-8 weight_decay: 1.0e-6 - -training: - device: "cuda:0" - seed: 42 - debug: False - resume: True - # optimization - # lr_scheduler: cosine - # lr_warmup_steps: 500 - num_epochs: 8000 - # gradient_accumulate_every: 1 - # EMA destroys performance when used with BatchNorm - # replace BatchNorm with GroupNorm. - # use_ema: True - freeze_encoder: False - # training loop control - # in epochs - rollout_every: 50 - checkpoint_every: 50 - val_every: 1 - sample_every: 5 - # steps per epoch - max_train_steps: null - max_val_steps: null - # misc - tqdm_interval_sec: 1.0 From b6353964ba77f29fea03eaa1b7b2c12a2c6a8ccf Mon Sep 17 00:00:00 2001 From: Alexander Soare Date: Wed, 27 Mar 2024 13:08:47 +0000 Subject: [PATCH 3/3] fix bug: use provided revision instead of hardcoded one --- lerobot/scripts/eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 1de0bb0e..28a25e43 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -268,7 +268,7 @@ if __name__ == "__main__": # TODO(alexander-soare): Save and load stats in trained model directory. stats_path = None elif args.hub_id is not None: - folder = Path(snapshot_download(args.hub_id, revision="v1.0")) + folder = Path(snapshot_download(args.hub_id, revision=args.revision)) cfg = hydra.initialize(config_path=str(_relative_path_between(folder, Path(__file__).parent))) cfg = hydra.compose("config", args.overrides) cfg.policy.pretrained_model_path = folder / "model.pt"