diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 4ae161f6..10106fe9 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -86,11 +86,10 @@ def make_dataset( ] ) - delta_timestamps = cfg.policy.get("delta_timestamps") - if delta_timestamps is not None: - for key in delta_timestamps: - if isinstance(delta_timestamps[key], str): - delta_timestamps[key] = eval(delta_timestamps[key]) + delta_timestamps = cfg.policy.delta_timestamps + for key in delta_timestamps: + if isinstance(delta_timestamps[key], str): + delta_timestamps[key] = eval(delta_timestamps[key]) dataset = clsfunc( dataset_id=cfg.dataset_id, diff --git a/lerobot/common/policies/act/configuration_act.py b/lerobot/common/policies/act/configuration_act.py new file mode 100644 index 00000000..a3dc1590 --- /dev/null +++ b/lerobot/common/policies/act/configuration_act.py @@ -0,0 +1,68 @@ +from dataclasses import dataclass + + +@dataclass +class ActConfig: + """ + TODO(now): Document all variables + TODO(now): Pick sensible defaults for a use case? + """ + + # Environment. + state_dim: int + action_dim: int + + # Inputs / output structure. + n_obs_steps: int + camera_names: list[str] + chunk_size: int + n_action_steps: int + + # Vision preprocessing. + image_normalization_mean: tuple[float, float, float] + image_normalization_std: tuple[float, float, float] + + # Architecture. + # Vision backbone. + vision_backbone: str + use_pretrained_backbone: bool + replace_final_stride_with_dilation: int + # Transformer layers. + pre_norm: bool + d_model: int + n_heads: int + dim_feedforward: int + feedforward_activation: str + n_encoder_layers: int + n_decoder_layers: int + # VAE. + use_vae: bool + latent_dim: int + n_vae_encoder_layers: int + + # Inference. + use_temporal_aggregation: bool + + # Training and loss computation. + dropout: float + kl_weight: float + + # --- + # TODO(alexander-soare): Remove these from the policy config. + batch_size: int + lr: float + lr_backbone: float + weight_decay: float + grad_clip_norm: float + utd: int + + def __post_init__(self): + """Input validation.""" + if not self.vision_backbone.startswith("resnet"): + raise ValueError("`vision_backbone` must be one of the ResNet variants.") + if self.use_temporal_aggregation: + raise NotImplementedError("Temporal aggregation is not yet implemented.") + if self.n_action_steps > self.chunk_size: + raise ValueError( + "The chunk size is the upper bound for the number of action steps per model invocation." + ) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py new file mode 100644 index 00000000..769c9470 --- /dev/null +++ b/lerobot/common/policies/act/modeling_act.py @@ -0,0 +1,626 @@ +"""Action Chunking Transformer Policy + +As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). +The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. +""" + +import math +import time +from collections import deque +from itertools import chain +from typing import Callable + +import einops +import numpy as np +import torch +import torch.nn.functional as F # noqa: N812 +import torchvision +import torchvision.transforms as transforms +from torch import Tensor, nn +from torchvision.models._utils import IntermediateLayerGetter +from torchvision.ops.misc import FrozenBatchNorm2d + +from lerobot.common.policies.act.configuration_act import ActConfig + + +class ActPolicy(nn.Module): + """ + Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost + Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) + + Note: In this code we use the terms `vae_encoder`, 'encoder', `decoder`. The meanings are as follows. + - The `vae_encoder` is, as per the literature around variational auto-encoders (VAE), the part of the + model that encodes the target data (a sequence of actions), and the condition (the robot + joint-space). + - A transformer with an `encoder` (not the VAE encoder) and `decoder` (not the VAE decoder) with + cross-attention is used as the VAE decoder. For these terms, we drop the `vae_` prefix because we + have an option to train this model without the variational objective (in which case we drop the + `vae_encoder` altogether, and nothing about this model has anything to do with a VAE). + + Transformer + Used alone for inference + (acts as VAE decoder + during training) + ┌───────────────────────┐ + │ Outputs │ + │ ▲ │ + │ ┌─────►┌───────┐ │ + ┌──────┐ │ │ │Transf.│ │ + │ │ │ ├─────►│decoder│ │ + ┌────┴────┐ │ │ │ │ │ │ + │ │ │ │ ┌───┴───┬─►│ │ │ + │ VAE │ │ │ │ │ └───────┘ │ + │ encoder │ │ │ │Transf.│ │ + │ │ │ │ │encoder│ │ + └───▲─────┘ │ │ │ │ │ + │ │ │ └───▲───┘ │ + │ │ │ │ │ + inputs └─────┼─────┘ │ + │ │ + └───────────────────────┘ + """ + + name = "act" + _multiple_obs_steps_not_handled_msg = "ActPolicy does not handle multiple observation steps." + + def __init__(self, cfg: ActConfig): + """ + TODO(alexander-soare): Add documentation for all parameters once we have model configs established. + """ + super().__init__() + if getattr(cfg, "n_obs_steps", 1) != 1: + raise ValueError(self._multiple_obs_steps_not_handled_msg) + self.cfg = cfg + + # BERT style VAE encoder with input [cls, *joint_space_configuration, *action_sequence]. + # The cls token forms parameters of the latent's distribution (like this [*means, *log_variances]). + if self.cfg.use_vae: + self.vae_encoder = _TransformerEncoder(cfg) + self.vae_encoder_cls_embed = nn.Embedding(1, cfg.d_model) + # Projection layer for joint-space configuration to hidden dimension. + self.vae_encoder_robot_state_input_proj = nn.Linear(cfg.state_dim, cfg.d_model) + # Projection layer for action (joint-space target) to hidden dimension. + self.vae_encoder_action_input_proj = nn.Linear(cfg.state_dim, cfg.d_model) + self.latent_dim = cfg.latent_dim + # Projection layer from the VAE encoder's output to the latent distribution's parameter space. + self.vae_encoder_latent_output_proj = nn.Linear(cfg.d_model, self.latent_dim * 2) + # Fixed sinusoidal positional embedding the whole input to the VAE encoder. Unsqueeze for batch + # dimension. + self.register_buffer( + "vae_encoder_pos_enc", + _create_sinusoidal_position_embedding(1 + 1 + cfg.chunk_size, cfg.d_model).unsqueeze(0), + ) + + # Backbone for image feature extraction. + self.image_normalizer = transforms.Normalize( + mean=cfg.image_normalization_mean, std=cfg.image_normalization_std + ) + backbone_model = getattr(torchvision.models, cfg.vision_backbone)( + replace_stride_with_dilation=[False, False, cfg.replace_final_stride_with_dilation], + pretrained=cfg.use_pretrained_backbone, + norm_layer=FrozenBatchNorm2d, + ) + # Note: The assumption here is that we are using a ResNet model (and hence layer4 is the final feature + # map). + # Note: The forward method of this returns a dict: {"feature_map": output}. + self.backbone = IntermediateLayerGetter(backbone_model, return_layers={"layer4": "feature_map"}) + + # Transformer (acts as VAE decoder when training with the variational objective). + self.encoder = _TransformerEncoder(cfg) + self.decoder = _TransformerDecoder(cfg) + + # Transformer encoder input projections. The tokens will be structured like + # [latent, robot_state, image_feature_map_pixels]. + self.encoder_robot_state_input_proj = nn.Linear(cfg.state_dim, cfg.d_model) + self.encoder_latent_input_proj = nn.Linear(self.latent_dim, cfg.d_model) + self.encoder_img_feat_input_proj = nn.Conv2d( + backbone_model.fc.in_features, cfg.d_model, kernel_size=1 + ) + # Transformer encoder positional embeddings. + self.encoder_robot_and_latent_pos_embed = nn.Embedding(2, cfg.d_model) + self.encoder_cam_feat_pos_embed = _SinusoidalPositionEmbedding2D(cfg.d_model // 2) + + # Transformer decoder. + # Learnable positional embedding for the transformer's decoder (in the style of DETR object queries). + self.decoder_pos_embed = nn.Embedding(cfg.chunk_size, cfg.d_model) + + # Final action regression head on the output of the transformer's decoder. + self.action_head = nn.Linear(cfg.d_model, cfg.action_dim) + + self._reset_parameters() + self._create_optimizer() + + def _create_optimizer(self): + optimizer_params_dicts = [ + { + "params": [ + p for n, p in self.named_parameters() if not n.startswith("backbone") and p.requires_grad + ] + }, + { + "params": [ + p for n, p in self.named_parameters() if n.startswith("backbone") and p.requires_grad + ], + "lr": self.cfg.lr_backbone, + }, + ] + self.optimizer = torch.optim.AdamW( + optimizer_params_dicts, lr=self.cfg.lr, weight_decay=self.cfg.weight_decay + ) + + def _reset_parameters(self): + """Xavier-uniform initialization of the transformer parameters as in the original code.""" + for p in chain(self.encoder.parameters(), self.decoder.parameters()): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def reset(self): + """This should be called whenever the environment is reset.""" + if self.cfg.n_action_steps is not None: + self._action_queue = deque([], maxlen=self.cfg.n_action_steps) + + @torch.no_grad + def select_action(self, batch: dict[str, Tensor], **_) -> Tensor: + """ + This method wraps `select_actions` in order to return one action at a time for execution in the + environment. It works by managing the actions in a queue and only calling `select_actions` when the + queue is empty. + """ + if len(self._action_queue) == 0: + # `select_actions` returns a (batch_size, n_action_steps, *) tensor, but the queue effectively has + # shape (n_action_steps, batch_size, *), hence the transpose. + self._action_queue.extend(self.select_actions(batch).transpose(0, 1)) + return self._action_queue.popleft() + + @torch.no_grad + def select_actions(self, batch: dict[str, Tensor]) -> Tensor: + """Use the action chunking transformer to generate a sequence of actions.""" + self.eval() + self._preprocess_batch(batch, add_obs_steps_dim=True) + + action = self.forward(batch, return_loss=False) + + return action[: self.cfg.n_action_steps] + + def __call__(self, *args, **kwargs) -> dict: + # TODO(alexander-soare): Temporary bridge until we know what to do about the `update` method. + return self.update(*args, **kwargs) + + def _preprocess_batch( + self, batch: dict[str, Tensor], add_obs_steps_dim: bool = False + ) -> dict[str, Tensor]: + """ + This function expects `batch` to have (at least): + { + "observation.state": (B, 1, J) OR (B, J) tensor of robot states (joint configuration). + "observation.images.top": (B, 1, C, H, W) OR (B, C, H, W) tensor of images. + } + """ + if add_obs_steps_dim: + # Add a dimension for the observations steps. Since n_obs_steps > 1 is not supported right now, + # this just amounts to an unsqueeze. + for k in batch: + if k.startswith("observation."): + batch[k] = batch[k].unsqueeze(1) + + if batch["observation.state"].shape[1] != 1: + raise ValueError(self._multiple_obs_steps_not_handled_msg) + batch["observation.state"] = batch["observation.state"].squeeze(1) + # TODO(alexander-soare): generalize this to multiple images. + assert ( + sum(k.startswith("observation.images.") and not k.endswith("is_pad") for k in batch) == 1 + ), "ACT only handles one image for now." + # Note: no squeeze is required for "observation.images.top" because then we'd have to unsqueeze to get + # the image index dimension. + + def update(self, batch, **_) -> dict: + """Run the model in train mode, compute the loss, and do an optimization step.""" + start_time = time.time() + self._preprocess_batch(batch) + + self.train() + + num_slices = self.cfg.batch_size + batch_size = self.cfg.chunk_size * num_slices + + assert batch_size % self.cfg.chunk_size == 0 + assert batch_size % num_slices == 0 + + loss = self.forward(batch, return_loss=True)["loss"] + loss.backward() + + grad_norm = torch.nn.utils.clip_grad_norm_( + self.parameters(), + self.cfg.grad_clip_norm, + error_if_nonfinite=False, + ) + + self.optimizer.step() + self.optimizer.zero_grad() + + info = { + "loss": loss.item(), + "grad_norm": float(grad_norm), + "lr": self.cfg.lr, + "update_s": time.time() - start_time, + } + + return info + + def forward(self, batch: dict[str, Tensor], return_loss: bool = False) -> dict | Tensor: + """A forward pass through the DNN part of this policy with optional loss computation.""" + images = self.image_normalizer(batch["observation.images.top"]) + + if return_loss: # training time + actions_hat, (mu_hat, log_sigma_x2_hat) = self._forward( + batch["observation.state"], images, batch["action"] + ) + + l1_loss = ( + F.l1_loss(batch["action"], actions_hat, reduction="none") + * ~batch["action_is_pad"].unsqueeze(-1) + ).mean() + + loss_dict = {} + loss_dict["l1"] = l1_loss + if self.cfg.use_vae: + # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for + # each dimension independently, we sum over the latent dimension to get the total + # KL-divergence per batch element, then take the mean over the batch. + # (See App. B of https://arxiv.org/abs/1312.6114 for more details). + mean_kld = ( + (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean() + ) + loss_dict["kl"] = mean_kld + loss_dict["loss"] = loss_dict["l1"] + loss_dict["kl"] * self.cfg.kl_weight + else: + loss_dict["loss"] = loss_dict["l1"] + return loss_dict + else: + action, _ = self._forward(batch["observation.state"], images) + return action + + def _forward( + self, robot_state: Tensor, image: Tensor, actions: Tensor | None = None + ) -> tuple[Tensor, tuple[Tensor | None, Tensor | None]]: + """ + Args: + robot_state: (B, J) batch of robot joint configurations. + image: (B, N, C, H, W) batch of N camera frames. + actions: (B, S, A) batch of actions from the target dataset which must be provided if the + VAE is enabled and the model is in training mode. + Returns: + (B, S, A) batch of action sequences + Tuple containing the latent PDF's parameters (mean, log(σ²)) both as (B, L) tensors where L is the + latent dimension. + """ + if self.cfg.use_vae and self.training: + assert ( + actions is not None + ), "actions must be provided when using the variational objective in training mode." + + batch_size = robot_state.shape[0] + + # Prepare the latent for input to the transformer encoder. + if self.cfg.use_vae and actions is not None: + # Prepare the input to the VAE encoder: [cls, *joint_space_configuration, *action_sequence]. + cls_embed = einops.repeat( + self.vae_encoder_cls_embed.weight, "1 d -> b 1 d", b=batch_size + ) # (B, 1, D) + robot_state_embed = self.vae_encoder_robot_state_input_proj(robot_state).unsqueeze(1) # (B, 1, D) + action_embed = self.vae_encoder_action_input_proj(actions) # (B, S, D) + vae_encoder_input = torch.cat([cls_embed, robot_state_embed, action_embed], axis=1) # (B, S+2, D) + + # Prepare fixed positional embedding. + # Note: detach() shouldn't be necessary but leaving it the same as the original code just in case. + pos_embed = self.vae_encoder_pos_enc.clone().detach() # (1, S+2, D) + + # Forward pass through VAE encoder to get the latent PDF parameters. + cls_token_out = self.vae_encoder( + vae_encoder_input.permute(1, 0, 2), pos_embed=pos_embed.permute(1, 0, 2) + )[0] # select the class token, with shape (B, D) + latent_pdf_params = self.vae_encoder_latent_output_proj(cls_token_out) + mu = latent_pdf_params[:, : self.latent_dim] + # This is 2log(sigma). Done this way to match the original implementation. + log_sigma_x2 = latent_pdf_params[:, self.latent_dim :] + + # Sample the latent with the reparameterization trick. + latent_sample = mu + log_sigma_x2.div(2).exp() * torch.randn_like(mu) + else: + # When not using the VAE encoder, we set the latent to be all zeros. + mu = log_sigma_x2 = None + latent_sample = torch.zeros([batch_size, self.latent_dim], dtype=torch.float32).to( + robot_state.device + ) + + # Prepare all other transformer encoder inputs. + # Camera observation features and positional embeddings. + all_cam_features = [] + all_cam_pos_embeds = [] + for cam_id, _ in enumerate(self.cfg.camera_names): + cam_features = self.backbone(image[:, cam_id])["feature_map"] + cam_pos_embed = self.encoder_cam_feat_pos_embed(cam_features).to(dtype=cam_features.dtype) + cam_features = self.encoder_img_feat_input_proj(cam_features) # (B, C, h, w) + all_cam_features.append(cam_features) + all_cam_pos_embeds.append(cam_pos_embed) + # Concatenate camera observation feature maps and positional embeddings along the width dimension. + encoder_in = torch.cat(all_cam_features, axis=3) + cam_pos_embed = torch.cat(all_cam_pos_embeds, axis=3) + + # Get positional embeddings for robot state and latent. + robot_state_embed = self.encoder_robot_state_input_proj(robot_state) + latent_embed = self.encoder_latent_input_proj(latent_sample) + + # Stack encoder input and positional embeddings moving to (S, B, C). + encoder_in = torch.cat( + [ + torch.stack([latent_embed, robot_state_embed], axis=0), + encoder_in.flatten(2).permute(2, 0, 1), + ] + ) + pos_embed = torch.cat( + [ + self.encoder_robot_and_latent_pos_embed.weight.unsqueeze(1), + cam_pos_embed.flatten(2).permute(2, 0, 1), + ], + axis=0, + ) + + # Forward pass through the transformer modules. + encoder_out = self.encoder(encoder_in, pos_embed=pos_embed) + decoder_in = torch.zeros( + (self.cfg.chunk_size, batch_size, self.cfg.d_model), + dtype=pos_embed.dtype, + device=pos_embed.device, + ) + decoder_out = self.decoder( + decoder_in, + encoder_out, + encoder_pos_embed=pos_embed, + decoder_pos_embed=self.decoder_pos_embed.weight.unsqueeze(1), + ) + + # Move back to (B, S, C). + decoder_out = decoder_out.transpose(0, 1) + + actions = self.action_head(decoder_out) + + return actions, (mu, log_sigma_x2) + + def save(self, fp): + torch.save(self.state_dict(), fp) + + def load(self, fp): + d = torch.load(fp) + self.load_state_dict(d) + + +class _TransformerEncoder(nn.Module): + """Convenience module for running multiple encoder layers, maybe followed by normalization.""" + + def __init__(self, cfg: ActConfig): + super().__init__() + self.layers = nn.ModuleList([_TransformerEncoderLayer(cfg) for _ in range(cfg.n_encoder_layers)]) + self.norm = nn.LayerNorm(cfg.d_model) if cfg.pre_norm else nn.Identity() + + def forward(self, x: Tensor, pos_embed: Tensor | None = None) -> Tensor: + for layer in self.layers: + x = layer(x, pos_embed=pos_embed) + x = self.norm(x) + return x + + +class _TransformerEncoderLayer(nn.Module): + def __init__(self, cfg: ActConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(cfg.d_model, cfg.n_heads, dropout=cfg.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(cfg.d_model, cfg.dim_feedforward) + self.dropout = nn.Dropout(cfg.dropout) + self.linear2 = nn.Linear(cfg.dim_feedforward, cfg.d_model) + + self.norm1 = nn.LayerNorm(cfg.d_model) + self.norm2 = nn.LayerNorm(cfg.d_model) + self.dropout1 = nn.Dropout(cfg.dropout) + self.dropout2 = nn.Dropout(cfg.dropout) + + self.activation = _get_activation_fn(cfg.feedforward_activation) + self.pre_norm = cfg.pre_norm + + def forward(self, x, pos_embed: Tensor | None = None) -> Tensor: + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = x if pos_embed is None else x + pos_embed + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout2(x) + if not self.pre_norm: + x = self.norm2(x) + return x + + +class _TransformerDecoder(nn.Module): + def __init__(self, cfg: ActConfig): + """Convenience module for running multiple decoder layers followed by normalization.""" + super().__init__() + self.layers = nn.ModuleList([_TransformerDecoderLayer(cfg) for _ in range(cfg.n_decoder_layers)]) + self.norm = nn.LayerNorm(cfg.d_model) + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + for layer in self.layers: + x = layer( + x, encoder_out, decoder_pos_embed=decoder_pos_embed, encoder_pos_embed=encoder_pos_embed + ) + if self.norm is not None: + x = self.norm(x) + return x + + +class _TransformerDecoderLayer(nn.Module): + def __init__(self, cfg: ActConfig): + super().__init__() + self.self_attn = nn.MultiheadAttention(cfg.d_model, cfg.n_heads, dropout=cfg.dropout) + self.multihead_attn = nn.MultiheadAttention(cfg.d_model, cfg.n_heads, dropout=cfg.dropout) + + # Feed forward layers. + self.linear1 = nn.Linear(cfg.d_model, cfg.dim_feedforward) + self.dropout = nn.Dropout(cfg.dropout) + self.linear2 = nn.Linear(cfg.dim_feedforward, cfg.d_model) + + self.norm1 = nn.LayerNorm(cfg.d_model) + self.norm2 = nn.LayerNorm(cfg.d_model) + self.norm3 = nn.LayerNorm(cfg.d_model) + self.dropout1 = nn.Dropout(cfg.dropout) + self.dropout2 = nn.Dropout(cfg.dropout) + self.dropout3 = nn.Dropout(cfg.dropout) + + self.activation = _get_activation_fn(cfg.feedforward_activation) + self.pre_norm = cfg.pre_norm + + def maybe_add_pos_embed(self, tensor: Tensor, pos_embed: Tensor | None) -> Tensor: + return tensor if pos_embed is None else tensor + pos_embed + + def forward( + self, + x: Tensor, + encoder_out: Tensor, + decoder_pos_embed: Tensor | None = None, + encoder_pos_embed: Tensor | None = None, + ) -> Tensor: + """ + Args: + x: (Decoder Sequence, Batch, Channel) tensor of input tokens. + encoder_out: (Encoder Sequence, B, C) output features from the last layer of the encoder we are + cross-attending with. + decoder_pos_embed: (ES, 1, C) positional embedding for keys (from the encoder). + encoder_pos_embed: (DS, 1, C) Positional_embedding for the queries (from the decoder). + Returns: + (DS, B, C) tensor of decoder output features. + """ + skip = x + if self.pre_norm: + x = self.norm1(x) + q = k = self.maybe_add_pos_embed(x, decoder_pos_embed) + x = self.self_attn(q, k, value=x)[0] # select just the output, not the attention weights + x = skip + self.dropout1(x) + if self.pre_norm: + skip = x + x = self.norm2(x) + else: + x = self.norm1(x) + skip = x + x = self.multihead_attn( + query=self.maybe_add_pos_embed(x, decoder_pos_embed), + key=self.maybe_add_pos_embed(encoder_out, encoder_pos_embed), + value=encoder_out, + )[0] # select just the output, not the attention weights + x = skip + self.dropout2(x) + if self.pre_norm: + skip = x + x = self.norm3(x) + else: + x = self.norm2(x) + skip = x + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + x = skip + self.dropout3(x) + if not self.pre_norm: + x = self.norm3(x) + return x + + +def _create_sinusoidal_position_embedding(num_positions: int, dimension: int) -> Tensor: + """1D sinusoidal positional embeddings as in Attention is All You Need. + + Args: + num_positions: Number of token positions required. + Returns: (num_positions, dimension) position embeddings (the first dimension is the batch dimension). + + """ + + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / dimension) for hid_j in range(dimension)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(num_positions)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + return torch.from_numpy(sinusoid_table).float() + + +class _SinusoidalPositionEmbedding2D(nn.Module): + """2D sinusoidal positional embeddings similar to what's presented in Attention Is All You Need. + + The variation is that the position indices are normalized in [0, 2π] (not quite: the lower bound is 1/H + for the vertical direction, and 1/W for the horizontal direction. + """ + + def __init__(self, dimension: int): + """ + Args: + dimension: The desired dimension of the embeddings. + """ + super().__init__() + self.dimension = dimension + self._two_pi = 2 * math.pi + self._eps = 1e-6 + # Inverse "common ratio" for the geometric progression in sinusoid frequencies. + self._temperature = 10000 + + def forward(self, x: Tensor) -> Tensor: + """ + Args: + x: A (B, C, H, W) batch of 2D feature map to generate the embeddings for. + Returns: + A (1, C, H, W) batch of corresponding sinusoidal positional embeddings. + """ + not_mask = torch.ones_like(x[0, :1]) # (1, H, W) + # Note: These are like range(1, H+1) and range(1, W+1) respectively, but in most implementations + # they would be range(0, H) and range(0, W). Keeping it at as is to match the original code. + y_range = not_mask.cumsum(1, dtype=torch.float32) + x_range = not_mask.cumsum(2, dtype=torch.float32) + + # "Normalize" the position index such that it ranges in [0, 2π]. + # Note: Adding epsilon on the denominator should not be needed as all values of y_embed and x_range + # are non-zero by construction. This is an artifact of the original code. + y_range = y_range / (y_range[:, -1:, :] + self._eps) * self._two_pi + x_range = x_range / (x_range[:, :, -1:] + self._eps) * self._two_pi + + inverse_frequency = self._temperature ** ( + 2 * (torch.arange(self.dimension, dtype=torch.float32, device=x.device) // 2) / self.dimension + ) + + x_range = x_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + y_range = y_range.unsqueeze(-1) / inverse_frequency # (1, H, W, 1) + + # Note: this stack then flatten operation results in interleaved sine and cosine terms. + # pos_embed_x and pos_embed_y are (1, H, W, C // 2). + pos_embed_x = torch.stack((x_range[..., 0::2].sin(), x_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed_y = torch.stack((y_range[..., 0::2].sin(), y_range[..., 1::2].cos()), dim=-1).flatten(3) + pos_embed = torch.cat((pos_embed_y, pos_embed_x), dim=3).permute(0, 3, 1, 2) # (1, C, H, W) + + return pos_embed + + +def _get_activation_fn(activation: str) -> Callable: + """Return an activation function given a string.""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 9077d4d0..d9ba3f07 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -1,3 +1,8 @@ +import inspect + +from lerobot.common.utils import get_safe_torch_device + + def make_policy(cfg): if cfg.policy.name == "tdmpc": from lerobot.common.policies.tdmpc.policy import TDMPCPolicy @@ -21,10 +26,16 @@ def make_policy(cfg): **cfg.policy, ) elif cfg.policy.name == "act": - from lerobot.common.policies.act.policy import ActionChunkingTransformerPolicy + from lerobot.common.policies.act.configuration_act import ActConfig + from lerobot.common.policies.act.modeling_act import ActPolicy - policy = ActionChunkingTransformerPolicy(cfg.policy, cfg.device) - policy.to(cfg.device) + expected_kwargs = set(inspect.signature(ActConfig).parameters) + assert set(cfg.policy).issuperset( + expected_kwargs + ), f"Hydra config is missing arguments: {set(cfg.policy).difference(expected_kwargs)}" + policy_cfg = ActConfig(**{k: v for k, v in cfg.policy.items() if k in expected_kwargs}) + policy = ActPolicy(policy_cfg) + policy.to(get_safe_torch_device(cfg.device)) else: raise ValueError(cfg.policy.name) diff --git a/lerobot/configs/policy/act.yaml b/lerobot/configs/policy/act.yaml index e2074b46..8ae3087c 100644 --- a/lerobot/configs/policy/act.yaml +++ b/lerobot/configs/policy/act.yaml @@ -8,61 +8,64 @@ eval_freq: 10000 save_freq: 100000 log_freq: 250 -horizon: 100 n_obs_steps: 1 # when temporal_agg=False, n_action_steps=horizon -n_action_steps: ${horizon} policy: name: act pretrained_model_path: + # Environment. + # Inherit these from the environment. + state_dim: ??? + action_dim: ??? + + # Inputs / output structure. + n_obs_steps: ${n_obs_steps} + camera_names: [top] # [top, front_close, left_pillar, right_pillar] + chunk_size: 100 # chunk_size + n_action_steps: 100 + + # Vision preprocessing. + image_normalization_mean: [0.485, 0.456, 0.406] + image_normalization_std: [0.229, 0.224, 0.225] + + # Architecture. + # Vision backbone. + vision_backbone: resnet18 + use_pretrained_backbone: true + replace_final_stride_with_dilation: false + # Transformer layers. + pre_norm: false + d_model: 512 + n_heads: 8 + dim_feedforward: 3200 + feedforward_activation: relu + n_encoder_layers: 4 + n_decoder_layers: 1 + # VAE. + use_vae: true + latent_dim: 32 + n_vae_encoder_layers: 4 + + # Inference. + use_temporal_aggregation: false + + # Training and loss computation. + dropout: 0.1 + kl_weight: 10 + + # --- + # TODO(alexander-soare): Remove these from the policy config. + batch_size: 8 lr: 1e-5 lr_backbone: 1e-5 - pretrained_backbone: true weight_decay: 1e-4 grad_clip_norm: 10 - backbone: resnet18 - horizon: ${horizon} # chunk_size - kl_weight: 10 - d_model: 512 - dim_feedforward: 3200 - vae_enc_layers: 4 - enc_layers: 4 - dec_layers: 1 - num_heads: 8 - #camera_names: [top, front_close, left_pillar, right_pillar] - camera_names: [top] - dilation: false - dropout: 0.1 - pre_norm: false - activation: relu - latent_dim: 32 - - use_vae: true - - batch_size: 8 - - per_alpha: 0.6 - per_beta: 0.4 - - balanced_sampling: false utd: 1 - n_obs_steps: ${n_obs_steps} - n_action_steps: ${n_action_steps} - - temporal_agg: false - - state_dim: 14 - action_dim: 14 - - image_normalization: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - delta_timestamps: - observation.images.top: [0.0] - observation.state: [0.0] - action: "[i / ${fps} for i in range(${horizon})]" + observation.images.top: "[i / ${fps} for i in range(1 - ${n_obs_steps}, 1)]" + observation.state: "[i / ${fps} for i in range(1 - ${n_obs_steps}, 1)]" + action: "[i / ${fps} for i in range(${policy.chunk_size})]"