pass entire config to make_optimizer
This commit is contained in:
parent
3034272229
commit
06fc9b89e1
|
@ -160,9 +160,8 @@ class ACTPolicy(
|
||||||
|
|
||||||
return loss_dict
|
return loss_dict
|
||||||
|
|
||||||
def make_optimizer_and_scheduler(self, **kwargs):
|
def make_optimizer_and_scheduler(self, cfg):
|
||||||
"""Create the optimizer and learning rate scheduler for ACT"""
|
"""Create the optimizer and learning rate scheduler for ACT"""
|
||||||
lr, lr_backbone, weight_decay = kwargs["lr"], kwargs["lr_backbone"], kwargs["weight_decay"]
|
|
||||||
optimizer_params_dicts = [
|
optimizer_params_dicts = [
|
||||||
{
|
{
|
||||||
"params": [
|
"params": [
|
||||||
|
@ -177,10 +176,12 @@ class ACTPolicy(
|
||||||
for n, p in self.named_parameters()
|
for n, p in self.named_parameters()
|
||||||
if n.startswith("model.backbone") and p.requires_grad
|
if n.startswith("model.backbone") and p.requires_grad
|
||||||
],
|
],
|
||||||
"lr": lr_backbone,
|
"lr": cfg.training.lr_backbone,
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
optimizer = torch.optim.AdamW(optimizer_params_dicts, lr=lr, weight_decay=weight_decay)
|
optimizer = torch.optim.AdamW(
|
||||||
|
optimizer_params_dicts, lr=cfg.training.lr, weight_decay=cfg.training.weight_decay
|
||||||
|
)
|
||||||
lr_scheduler = None
|
lr_scheduler = None
|
||||||
return optimizer, lr_scheduler
|
return optimizer, lr_scheduler
|
||||||
|
|
||||||
|
|
|
@ -156,33 +156,22 @@ class DiffusionPolicy(
|
||||||
loss = self.diffusion.compute_loss(batch)
|
loss = self.diffusion.compute_loss(batch)
|
||||||
return {"loss": loss}
|
return {"loss": loss}
|
||||||
|
|
||||||
def make_optimizer_and_scheduler(self, **kwargs):
|
def make_optimizer_and_scheduler(self, cfg):
|
||||||
"""Create the optimizer and learning rate scheduler for Diffusion policy"""
|
"""Create the optimizer and learning rate scheduler for Diffusion policy"""
|
||||||
lr, adam_betas, adam_eps, adam_weight_decay = (
|
|
||||||
kwargs["lr"],
|
|
||||||
kwargs["adam_betas"],
|
|
||||||
kwargs["adam_eps"],
|
|
||||||
kwargs["adam_weight_decay"],
|
|
||||||
)
|
|
||||||
lr_scheduler_name, lr_warmup_steps, offline_steps = (
|
|
||||||
kwargs["lr_scheduler"],
|
|
||||||
kwargs["lr_warmup_steps"],
|
|
||||||
kwargs["offline_steps"],
|
|
||||||
)
|
|
||||||
optimizer = torch.optim.Adam(
|
optimizer = torch.optim.Adam(
|
||||||
self.diffusion.parameters(),
|
self.diffusion.parameters(),
|
||||||
lr,
|
cfg.training.lr,
|
||||||
adam_betas,
|
cfg.training.adam_betas,
|
||||||
adam_eps,
|
cfg.training.adam_eps,
|
||||||
adam_weight_decay,
|
cfg.training.adam_weight_decay,
|
||||||
)
|
)
|
||||||
from diffusers.optimization import get_scheduler
|
from diffusers.optimization import get_scheduler
|
||||||
|
|
||||||
lr_scheduler = get_scheduler(
|
lr_scheduler = get_scheduler(
|
||||||
lr_scheduler_name,
|
cfg.training.lr_scheduler,
|
||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
num_warmup_steps=lr_warmup_steps,
|
num_warmup_steps=cfg.training.lr_warmup_steps,
|
||||||
num_training_steps=offline_steps,
|
num_training_steps=cfg.training.offline_steps,
|
||||||
)
|
)
|
||||||
return optimizer, lr_scheduler
|
return optimizer, lr_scheduler
|
||||||
|
|
||||||
|
|
|
@ -534,10 +534,9 @@ class TDMPCPolicy(
|
||||||
# we update every step and adjust the decay parameter `alpha` accordingly (0.99 -> 0.995)
|
# we update every step and adjust the decay parameter `alpha` accordingly (0.99 -> 0.995)
|
||||||
update_ema_parameters(self.model_target, self.model, self.config.target_model_momentum)
|
update_ema_parameters(self.model_target, self.model, self.config.target_model_momentum)
|
||||||
|
|
||||||
def make_optimizer_and_scheduler(self, **kwargs):
|
def make_optimizer_and_scheduler(self, cfg):
|
||||||
"""Create the optimizer and learning rate scheduler for TD-MPC"""
|
"""Create the optimizer and learning rate scheduler for TD-MPC"""
|
||||||
lr = kwargs["lr"]
|
optimizer = torch.optim.Adam(self.parameters(), cfg.training.lr)
|
||||||
optimizer = torch.optim.Adam(self.parameters(), lr)
|
|
||||||
lr_scheduler = None
|
lr_scheduler = None
|
||||||
return optimizer, lr_scheduler
|
return optimizer, lr_scheduler
|
||||||
|
|
||||||
|
|
|
@ -152,6 +152,12 @@ class VQBeTPolicy(
|
||||||
|
|
||||||
return loss_dict
|
return loss_dict
|
||||||
|
|
||||||
|
def make_optimizer_and_scheduler(self, cfg):
|
||||||
|
"""Create the optimizer and learning rate scheduler for VQ-BeT"""
|
||||||
|
optimizer = VQBeTOptimizer(self, cfg)
|
||||||
|
scheduler = VQBeTScheduler(optimizer, cfg)
|
||||||
|
return optimizer, scheduler
|
||||||
|
|
||||||
|
|
||||||
class SpatialSoftmax(nn.Module):
|
class SpatialSoftmax(nn.Module):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -281,7 +281,7 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No
|
||||||
assert isinstance(policy, nn.Module)
|
assert isinstance(policy, nn.Module)
|
||||||
# Create optimizer and scheduler
|
# Create optimizer and scheduler
|
||||||
# Temporary hack to move optimizer out of policy
|
# Temporary hack to move optimizer out of policy
|
||||||
optimizer, lr_scheduler = policy.make_optimizer_and_scheduler(**cfg.training)
|
optimizer, lr_scheduler = policy.make_optimizer_and_scheduler(cfg)
|
||||||
grad_scaler = GradScaler(enabled=cfg.use_amp)
|
grad_scaler = GradScaler(enabled=cfg.use_amp)
|
||||||
|
|
||||||
step = 0 # number of policy updates (forward + backward + optim)
|
step = 0 # number of policy updates (forward + backward + optim)
|
||||||
|
|
|
@ -39,7 +39,7 @@ def get_policy_stats(env_name, policy_name, extra_overrides):
|
||||||
dataset = make_dataset(cfg)
|
dataset = make_dataset(cfg)
|
||||||
policy = make_policy(cfg, dataset_stats=dataset.stats)
|
policy = make_policy(cfg, dataset_stats=dataset.stats)
|
||||||
policy.train()
|
policy.train()
|
||||||
optimizer, _ = policy.make_optimizer_and_scheduler(**cfg.training)
|
optimizer, _ = policy.make_optimizer_and_scheduler(cfg)
|
||||||
|
|
||||||
dataloader = torch.utils.data.DataLoader(
|
dataloader = torch.utils.data.DataLoader(
|
||||||
dataset,
|
dataset,
|
||||||
|
|
|
@ -213,7 +213,7 @@ def test_act_backbone_lr():
|
||||||
|
|
||||||
dataset = make_dataset(cfg)
|
dataset = make_dataset(cfg)
|
||||||
policy = make_policy(hydra_cfg=cfg, dataset_stats=dataset.stats)
|
policy = make_policy(hydra_cfg=cfg, dataset_stats=dataset.stats)
|
||||||
optimizer, _ = policy.make_optimizer_and_scheduler(**cfg.training)
|
optimizer, _ = policy.make_optimizer_and_scheduler(cfg)
|
||||||
assert len(optimizer.param_groups) == 2
|
assert len(optimizer.param_groups) == 2
|
||||||
assert optimizer.param_groups[0]["lr"] == cfg.training.lr
|
assert optimizer.param_groups[0]["lr"] == cfg.training.lr
|
||||||
assert optimizer.param_groups[1]["lr"] == cfg.training.lr_backbone
|
assert optimizer.param_groups[1]["lr"] == cfg.training.lr_backbone
|
||||||
|
|
Loading…
Reference in New Issue