101 lines
2.7 KiB
YAML
101 lines
2.7 KiB
YAML
# @package _global_
|
|
|
|
# Defaults for training for the PushT dataset as per https://github.com/real-stanford/diffusion_policy.
|
|
# Note: We do not track EMA model weights as we discovered it does not improve the results. See
|
|
# https://github.com/huggingface/lerobot/pull/134 for more details.
|
|
|
|
seed: 100000
|
|
dataset_repo_id: lerobot/pusht
|
|
|
|
training:
|
|
offline_steps: 200000
|
|
online_steps: 0
|
|
eval_freq: 5000
|
|
save_freq: 5000
|
|
log_freq: 250
|
|
save_model: true
|
|
|
|
batch_size: 64
|
|
grad_clip_norm: 10
|
|
lr: 1.0e-4
|
|
lr_scheduler: cosine
|
|
lr_warmup_steps: 500
|
|
adam_betas: [0.95, 0.999]
|
|
adam_eps: 1.0e-8
|
|
adam_weight_decay: 1.0e-6
|
|
online_steps_between_rollouts: 1
|
|
|
|
delta_timestamps:
|
|
observation.image: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
|
|
observation.state: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1)]"
|
|
action: "[i / ${fps} for i in range(1 - ${policy.n_obs_steps}, 1 - ${policy.n_obs_steps} + ${policy.horizon})]"
|
|
|
|
eval:
|
|
n_episodes: 50
|
|
batch_size: 50
|
|
|
|
override_dataset_stats:
|
|
# TODO(rcadene, alexander-soare): should we remove image stats as well? do we use a pretrained vision model?
|
|
observation.image:
|
|
mean: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
|
|
std: [[[0.5]], [[0.5]], [[0.5]]] # (c,1,1)
|
|
# TODO(rcadene, alexander-soare): we override state and action stats to use the same as the pretrained model
|
|
# from the original codebase, but we should remove these and train our own pretrained model
|
|
observation.state:
|
|
min: [13.456424, 32.938293]
|
|
max: [496.14618, 510.9579]
|
|
action:
|
|
min: [12.0, 25.0]
|
|
max: [511.0, 511.0]
|
|
|
|
policy:
|
|
name: diffusion
|
|
|
|
# Input / output structure.
|
|
n_obs_steps: 2
|
|
horizon: 16
|
|
n_action_steps: 8
|
|
|
|
input_shapes:
|
|
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
|
|
observation.image: [3, 96, 96]
|
|
observation.state: ["${env.state_dim}"]
|
|
output_shapes:
|
|
action: ["${env.action_dim}"]
|
|
|
|
# Normalization / Unnormalization
|
|
input_normalization_modes:
|
|
observation.image: mean_std
|
|
observation.state: min_max
|
|
output_normalization_modes:
|
|
action: min_max
|
|
|
|
# Architecture / modeling.
|
|
# Vision backbone.
|
|
vision_backbone: resnet18
|
|
crop_shape: [84, 84]
|
|
crop_is_random: True
|
|
pretrained_backbone_weights: null
|
|
use_group_norm: True
|
|
spatial_softmax_num_keypoints: 32
|
|
# Unet.
|
|
down_dims: [512, 1024, 2048]
|
|
kernel_size: 5
|
|
n_groups: 8
|
|
diffusion_step_embed_dim: 128
|
|
use_film_scale_modulation: True
|
|
# Noise scheduler.
|
|
num_train_timesteps: 100
|
|
beta_schedule: squaredcos_cap_v2
|
|
beta_start: 0.0001
|
|
beta_end: 0.02
|
|
prediction_type: epsilon # epsilon / sample
|
|
clip_sample: True
|
|
clip_sample_range: 1.0
|
|
|
|
# Inference
|
|
num_inference_steps: 100
|
|
|
|
# Loss computation
|
|
do_mask_loss_for_padding: false
|