lerobot/lerobot/configs/policy/diffusion.yaml

126 lines
2.5 KiB
YAML

# @package _global_
shape_meta:
# acceptable types: rgb, low_dim
obs:
image:
shape: [3, 96, 96]
type: rgb
agent_pos:
shape: [2]
type: low_dim
action:
shape: [2]
horizon: 16
n_obs_steps: 2
n_action_steps: 8
n_latency_steps: 0
dataset_obs_steps: ${n_obs_steps}
past_action_visible: False
keypoint_visible_rate: 1.0
obs_as_global_cond: True
eval_episodes: 50
eval_freq: 5000
save_freq: 5000
log_freq: 250
offline_steps: 50000
online_steps: 0
offline_prioritized_sampler: true
policy:
name: diffusion
shape_meta: ${shape_meta}
horizon: ${horizon}
# n_action_steps: ${eval:'${n_action_steps}+${n_latency_steps}'}
n_obs_steps: ${n_obs_steps}
num_inference_steps: 100
obs_as_global_cond: ${obs_as_global_cond}
# crop_shape: null
diffusion_step_embed_dim: 128
down_dims: [512, 1024, 2048]
kernel_size: 5
n_groups: 8
cond_predict_scale: True
pretrained_model_path:
batch_size: 64
per_alpha: 0.6
per_beta: 0.4
balanced_sampling: false
utd: 1
offline_steps: ${offline_steps}
ema:
enable: true
rate: 0.999
lr_scheduler: cosine
lr_warmup_steps: 500
grad_clip_norm: 10
noise_scheduler:
_target_: diffusers.schedulers.scheduling_ddpm.DDPMScheduler
num_train_timesteps: 100
beta_start: 0.0001
beta_end: 0.02
beta_schedule: squaredcos_cap_v2
variance_type: fixed_small # Yilun's paper uses fixed_small_log instead, but easy to cause Nan
clip_sample: True # required when predict_epsilon=False
prediction_type: epsilon # or sample
obs_encoder:
shape_meta: ${shape_meta}
# resize_shape: null
crop_shape: [84, 84]
# constant center crop
random_crop: True
use_group_norm: True
share_rgb_model: False
norm_mean_std: [0.5, 0.5] # for PushT the original impl normalizes to [-1, 1] (maybe not the case for robomimic envs)
rgb_model:
model_name: resnet18
pretrained: false
num_keypoints: 32
relu: true
optimizer:
_target_: torch.optim.AdamW
lr: 1.0e-4
betas: [0.95, 0.999]
eps: 1.0e-8
weight_decay: 1.0e-6
training:
device: "cuda:0"
seed: 42
debug: False
resume: True
# optimization
# lr_scheduler: cosine
# lr_warmup_steps: 500
num_epochs: 8000
# gradient_accumulate_every: 1
# EMA destroys performance when used with BatchNorm
# replace BatchNorm with GroupNorm.
# use_ema: True
freeze_encoder: False
# training loop control
# in epochs
rollout_every: 50
checkpoint_every: 50
val_every: 1
sample_every: 5
# steps per epoch
max_train_steps: null
max_val_steps: null
# misc
tqdm_interval_sec: 1.0