88 lines
2.2 KiB
YAML
88 lines
2.2 KiB
YAML
# @package _global_
|
|
|
|
# Change the seed to match what PushT eval uses
|
|
# (to avoid evaluating on seeds used for generating the training data).
|
|
seed: 100000
|
|
# Change the dataset repository to the PushT one.
|
|
dataset_repo_id: lerobot/pusht
|
|
|
|
override_dataset_stats:
|
|
observation.image:
|
|
# stats from imagenet, since we use a pretrained vision model
|
|
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
|
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
|
|
|
training:
|
|
offline_steps: 80000
|
|
online_steps: 0
|
|
eval_freq: 10000
|
|
save_freq: 100000
|
|
log_freq: 250
|
|
save_model: true
|
|
|
|
batch_size: 8
|
|
lr: 1e-5
|
|
lr_backbone: 1e-5
|
|
weight_decay: 1e-4
|
|
grad_clip_norm: 10
|
|
online_steps_between_rollouts: 1
|
|
|
|
delta_timestamps:
|
|
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
|
|
|
|
eval:
|
|
n_episodes: 50
|
|
batch_size: 50
|
|
|
|
# See `configuration_act.py` for more details.
|
|
policy:
|
|
name: act
|
|
|
|
# Input / output structure.
|
|
n_obs_steps: 1
|
|
chunk_size: 100 # chunk_size
|
|
n_action_steps: 100
|
|
|
|
input_shapes:
|
|
observation.image: [3, 96, 96]
|
|
observation.state: ["${env.state_dim}"]
|
|
output_shapes:
|
|
action: ["${env.action_dim}"]
|
|
|
|
# Normalization / Unnormalization
|
|
input_normalization_modes:
|
|
observation.image: mean_std
|
|
# Use min_max normalization just because it's more standard.
|
|
observation.state: min_max
|
|
output_normalization_modes:
|
|
# Use min_max normalization just because it's more standard.
|
|
action: min_max
|
|
|
|
# Architecture.
|
|
# Vision backbone.
|
|
vision_backbone: resnet18
|
|
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
|
|
replace_final_stride_with_dilation: false
|
|
# Transformer layers.
|
|
pre_norm: false
|
|
dim_model: 512
|
|
n_heads: 8
|
|
dim_feedforward: 3200
|
|
feedforward_activation: relu
|
|
n_encoder_layers: 4
|
|
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
|
|
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
|
|
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
|
|
n_decoder_layers: 1
|
|
# VAE.
|
|
use_vae: true
|
|
latent_dim: 32
|
|
n_vae_encoder_layers: 4
|
|
|
|
# Inference.
|
|
temporal_ensemble_coeff: null
|
|
|
|
# Training and loss computation.
|
|
dropout: 0.1
|
|
kl_weight: 10.0
|