From a0510c0f5e911b8df748ca6b10d90952ab03fb75 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 05:57:51 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- lerobot/common/optim/schedulers.py | 8 ++++++-- lerobot/common/policies/dexvla/README.md | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lerobot/common/optim/schedulers.py b/lerobot/common/optim/schedulers.py index 5609c8b4..e2ebb9e3 100644 --- a/lerobot/common/optim/schedulers.py +++ b/lerobot/common/optim/schedulers.py @@ -109,13 +109,16 @@ class CosineDecayWithWarmupSchedulerConfig(LRSchedulerConfig): return cosine_decay_schedule(current_step) return LambdaLR(optimizer, lr_lambda, -1) + + @LRSchedulerConfig.register_subclass("constant_with_warmup") @dataclass class ConstantWithWarmupSchedulerConfig(LRSchedulerConfig): """Used by DexVLA to train Stage2""" - num_warmup_steps: int - def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: + num_warmup_steps: int + + def build(self, optimizer: Optimizer, num_training_steps: int) -> LambdaLR: def lr_lambda(current_step): def linear_warmup_schedule(current_step): if current_step <= 0: @@ -133,6 +136,7 @@ class ConstantWithWarmupSchedulerConfig(LRSchedulerConfig): return LambdaLR(optimizer, lr_lambda, -1) + def save_scheduler_state(scheduler: LRScheduler, save_dir: Path) -> None: state_dict = scheduler.state_dict() write_json(state_dict, save_dir / SCHEDULER_STATE) diff --git a/lerobot/common/policies/dexvla/README.md b/lerobot/common/policies/dexvla/README.md index 4ea17308..cbf94d8b 100644 --- a/lerobot/common/policies/dexvla/README.md +++ b/lerobot/common/policies/dexvla/README.md @@ -90,7 +90,7 @@ python lerobot/scripts/train.py \ --output_dir /path/to/output \ --steps 10000 \ --save_freq 1000 \ ---optimizer_lr 2e-5 +--optimizer_lr 2e-5 ~~~ ### Training Stage 3 @@ -108,7 +108,7 @@ python lerobot/scripts/train.py \ --output_dir /path/to/output \ --steps 10000 \ --save_freq 1000 \ ---optimizer_lr 2e-5 +--optimizer_lr 2e-5 ~~~ ### Training Time @@ -133,7 +133,7 @@ python lerobot/scripts/eval.py \ --policy.qwen2_vl_path /path/to/official/Qwen2-VL-2B-Instruct \ --env.task AlohaInsertion-v0 \ --eval.n_episodes 1 \ ---eval.batch_size 1 +--eval.batch_size 1 ~~~ ### Inference Speed