lerobot/lerobot/configs/policy/act.yaml

68 lines
1.7 KiB
YAML

# @package _global_
offline_steps: 80000
online_steps: 0
eval_episodes: 1
eval_freq: 10000
save_freq: 100000
log_freq: 250
horizon: 100
n_obs_steps: 1
# when temporal_agg=False, n_action_steps=horizon
n_action_steps: ${horizon}
policy:
name: act
pretrained_model_path:
lr: 1e-5
lr_backbone: 1e-5
pretrained_backbone: true
weight_decay: 1e-4
grad_clip_norm: 10
backbone: resnet18
horizon: ${horizon} # chunk_size
kl_weight: 10
d_model: 512
dim_feedforward: 3200
vae_enc_layers: 4
enc_layers: 4
dec_layers: 1
num_heads: 8
#camera_names: [top, front_close, left_pillar, right_pillar]
camera_names: [top]
dilation: false
dropout: 0.1
pre_norm: false
activation: relu
latent_dim: 32
use_vae: true
batch_size: 8
per_alpha: 0.6
per_beta: 0.4
balanced_sampling: false
utd: 1
n_obs_steps: ${n_obs_steps}
temporal_agg: false
state_dim: 14
action_dim: 14
image_normalization:
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
delta_timestamps:
observation.images.top: [0.0]
observation.state: [0.0]
action: [0.0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.4, 0.42, 0.44, 0.46, 0.48, 0.5, 0.52, 0.54, 0.56, 0.58, 0.6, 0.62, 0.64, 0.66, 0.68, 0.70, 0.72, 0.74, 0.76, 0.78, 0.8, 0.82, 0.84, 0.86, 0.88, 0.9, 0.92, 0.94, 0.96, 0.98, 1.0, 1.02, 1.04, 1.06, 1.08, 1.1, 1.12, 1.14, 1.16, 1.18, 1.2, 1.22, 1.24, 1.26, 1.28, 1.3, 1.32, 1.34, 1.36, 1.38, 1.40, 1.42, 1.44, 1.46, 1.48, 1.5, 1.52, 1.54, 1.56, 1.58, 1.6, 1.62, 1.64, 1.66, 1.68, 1.7, 1.72, 1.74, 1.76, 1.78, 1.8, 1.82, 1.84, 1.86, 1.88, 1.90, 1.92, 1.94, 1.96, 1.98]