2024-03-01 07:13:06 +08:00
|
|
|
import logging
|
2024-04-29 18:27:58 +08:00
|
|
|
import time
|
2024-04-10 19:34:01 +08:00
|
|
|
from copy import deepcopy
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
from pathlib import Path
|
2024-02-10 23:46:24 +08:00
|
|
|
|
2024-04-18 17:43:16 +08:00
|
|
|
import datasets
|
2024-01-29 20:49:30 +08:00
|
|
|
import hydra
|
|
|
|
import torch
|
2024-04-16 05:26:33 +08:00
|
|
|
from datasets import concatenate_datasets
|
2024-04-18 17:43:16 +08:00
|
|
|
from datasets.utils import disable_progress_bars, enable_progress_bars
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-03-31 23:05:25 +08:00
|
|
|
from lerobot.common.datasets.factory import make_dataset
|
2024-04-05 18:59:32 +08:00
|
|
|
from lerobot.common.datasets.utils import cycle
|
2024-01-31 21:48:12 +08:00
|
|
|
from lerobot.common.envs.factory import make_env
|
2024-03-06 18:14:03 +08:00
|
|
|
from lerobot.common.logger import Logger, log_output_dir
|
2024-02-25 18:50:23 +08:00
|
|
|
from lerobot.common.policies.factory import make_policy
|
2024-05-01 23:40:04 +08:00
|
|
|
from lerobot.common.policies.policy_protocol import PolicyWithUpdate
|
2024-04-18 20:47:42 +08:00
|
|
|
from lerobot.common.utils.utils import (
|
2024-04-05 18:59:32 +08:00
|
|
|
format_big_number,
|
|
|
|
get_safe_torch_device,
|
|
|
|
init_logging,
|
|
|
|
set_global_seed,
|
|
|
|
)
|
2024-02-10 23:46:24 +08:00
|
|
|
from lerobot.scripts.eval import eval_policy
|
2024-01-29 20:49:30 +08:00
|
|
|
|
|
|
|
|
2024-05-04 22:20:30 +08:00
|
|
|
def make_optimizer_and_scheduler(cfg, policy):
|
|
|
|
if cfg.policy.name == "act":
|
|
|
|
optimizer_params_dicts = [
|
|
|
|
{
|
|
|
|
"params": [
|
|
|
|
p
|
|
|
|
for n, p in policy.named_parameters()
|
|
|
|
if not n.startswith("backbone") and p.requires_grad
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"params": [
|
|
|
|
p for n, p in policy.named_parameters() if n.startswith("backbone") and p.requires_grad
|
|
|
|
],
|
|
|
|
"lr": cfg.training.lr_backbone,
|
|
|
|
},
|
|
|
|
]
|
|
|
|
optimizer = torch.optim.AdamW(
|
|
|
|
optimizer_params_dicts, lr=cfg.training.lr, weight_decay=cfg.training.weight_decay
|
|
|
|
)
|
|
|
|
lr_scheduler = None
|
|
|
|
elif cfg.policy.name == "diffusion":
|
|
|
|
optimizer = torch.optim.Adam(
|
|
|
|
policy.diffusion.parameters(),
|
|
|
|
cfg.training.lr,
|
|
|
|
cfg.training.adam_betas,
|
|
|
|
cfg.training.adam_eps,
|
|
|
|
cfg.training.adam_weight_decay,
|
|
|
|
)
|
|
|
|
assert cfg.training.online_steps == 0, "Diffusion Policy does not handle online training."
|
2024-05-06 09:03:14 +08:00
|
|
|
from diffusers.optimization import get_scheduler
|
|
|
|
|
2024-05-04 22:20:30 +08:00
|
|
|
lr_scheduler = get_scheduler(
|
|
|
|
cfg.training.lr_scheduler,
|
|
|
|
optimizer=optimizer,
|
|
|
|
num_warmup_steps=cfg.training.lr_warmup_steps,
|
|
|
|
num_training_steps=cfg.training.offline_steps,
|
|
|
|
)
|
|
|
|
elif policy.name == "tdmpc":
|
|
|
|
optimizer = torch.optim.Adam(policy.parameters(), cfg.training.lr)
|
|
|
|
lr_scheduler = None
|
|
|
|
else:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
return optimizer, lr_scheduler
|
|
|
|
|
|
|
|
|
2024-04-29 18:27:58 +08:00
|
|
|
def update_policy(policy, batch, optimizer, grad_clip_norm, lr_scheduler=None):
|
2024-05-10 14:45:32 +08:00
|
|
|
"""Returns a dictionary of items for logging."""
|
2024-04-29 18:27:58 +08:00
|
|
|
start_time = time.time()
|
|
|
|
policy.train()
|
|
|
|
output_dict = policy.forward(batch)
|
|
|
|
# TODO(rcadene): policy.unnormalize_outputs(out_dict)
|
|
|
|
loss = output_dict["loss"]
|
|
|
|
loss.backward()
|
|
|
|
grad_norm = torch.nn.utils.clip_grad_norm_(
|
|
|
|
policy.parameters(),
|
|
|
|
grad_clip_norm,
|
|
|
|
error_if_nonfinite=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
2024-05-01 23:40:04 +08:00
|
|
|
|
2024-04-29 18:27:58 +08:00
|
|
|
if lr_scheduler is not None:
|
|
|
|
lr_scheduler.step()
|
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
if isinstance(policy, PolicyWithUpdate):
|
|
|
|
# To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC).
|
|
|
|
policy.update()
|
|
|
|
|
2024-04-29 18:27:58 +08:00
|
|
|
info = {
|
|
|
|
"loss": loss.item(),
|
|
|
|
"grad_norm": float(grad_norm),
|
2024-04-30 05:05:55 +08:00
|
|
|
"lr": optimizer.param_groups[0]["lr"],
|
2024-04-29 18:27:58 +08:00
|
|
|
"update_s": time.time() - start_time,
|
2024-05-10 14:45:32 +08:00
|
|
|
**{k: v for k, v in output_dict.items() if k != "loss"},
|
2024-04-29 18:27:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return info
|
|
|
|
|
|
|
|
|
2024-04-29 06:31:33 +08:00
|
|
|
@hydra.main(version_base="1.2", config_name="default", config_path="../configs")
|
2024-02-22 20:14:12 +08:00
|
|
|
def train_cli(cfg: dict):
|
|
|
|
train(
|
|
|
|
cfg,
|
|
|
|
out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir,
|
|
|
|
job_name=hydra.core.hydra_config.HydraConfig.get().job.name,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-02-29 20:37:48 +08:00
|
|
|
def train_notebook(out_dir=None, job_name=None, config_name="default", config_path="../configs"):
|
2024-02-22 20:14:12 +08:00
|
|
|
from hydra import compose, initialize
|
|
|
|
|
|
|
|
hydra.core.global_hydra.GlobalHydra.instance().clear()
|
|
|
|
initialize(config_path=config_path)
|
|
|
|
cfg = compose(config_name=config_name)
|
|
|
|
train(cfg, out_dir=out_dir, job_name=job_name)
|
|
|
|
|
|
|
|
|
2024-05-10 14:45:32 +08:00
|
|
|
def log_train_info(logger: Logger, info, step, cfg, dataset, is_offline):
|
2024-03-01 07:13:06 +08:00
|
|
|
loss = info["loss"]
|
|
|
|
grad_norm = info["grad_norm"]
|
|
|
|
lr = info["lr"]
|
|
|
|
update_s = info["update_s"]
|
|
|
|
|
|
|
|
# A sample is an (observation,action) pair, where observation and action
|
|
|
|
# can be on multiple timestamps. In a batch, we have `batch_size`` number of samples.
|
2024-04-30 23:08:59 +08:00
|
|
|
num_samples = (step + 1) * cfg.training.batch_size
|
2024-03-31 23:05:25 +08:00
|
|
|
avg_samples_per_ep = dataset.num_samples / dataset.num_episodes
|
2024-03-01 07:13:06 +08:00
|
|
|
num_episodes = num_samples / avg_samples_per_ep
|
2024-03-31 23:05:25 +08:00
|
|
|
num_epochs = num_samples / dataset.num_samples
|
2024-03-01 07:13:06 +08:00
|
|
|
log_items = [
|
2024-03-01 07:31:32 +08:00
|
|
|
f"step:{format_big_number(step)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of samples seen during training
|
2024-03-01 07:31:32 +08:00
|
|
|
f"smpl:{format_big_number(num_samples)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of episodes seen during training
|
2024-03-01 07:31:32 +08:00
|
|
|
f"ep:{format_big_number(num_episodes)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of time all unique samples are seen
|
|
|
|
f"epch:{num_epochs:.2f}",
|
|
|
|
f"loss:{loss:.3f}",
|
|
|
|
f"grdn:{grad_norm:.3f}",
|
|
|
|
f"lr:{lr:0.1e}",
|
|
|
|
# in seconds
|
|
|
|
f"updt_s:{update_s:.3f}",
|
|
|
|
]
|
|
|
|
logging.info(" ".join(log_items))
|
|
|
|
|
|
|
|
info["step"] = step
|
|
|
|
info["num_samples"] = num_samples
|
|
|
|
info["num_episodes"] = num_episodes
|
|
|
|
info["num_epochs"] = num_epochs
|
|
|
|
info["is_offline"] = is_offline
|
|
|
|
|
|
|
|
logger.log_dict(info, step, mode="train")
|
|
|
|
|
|
|
|
|
2024-03-31 23:05:25 +08:00
|
|
|
def log_eval_info(logger, info, step, cfg, dataset, is_offline):
|
2024-03-01 07:13:06 +08:00
|
|
|
eval_s = info["eval_s"]
|
|
|
|
avg_sum_reward = info["avg_sum_reward"]
|
|
|
|
pc_success = info["pc_success"]
|
|
|
|
|
|
|
|
# A sample is an (observation,action) pair, where observation and action
|
|
|
|
# can be on multiple timestamps. In a batch, we have `batch_size`` number of samples.
|
2024-04-30 23:08:59 +08:00
|
|
|
num_samples = (step + 1) * cfg.training.batch_size
|
2024-03-31 23:05:25 +08:00
|
|
|
avg_samples_per_ep = dataset.num_samples / dataset.num_episodes
|
2024-03-01 07:13:06 +08:00
|
|
|
num_episodes = num_samples / avg_samples_per_ep
|
2024-03-31 23:05:25 +08:00
|
|
|
num_epochs = num_samples / dataset.num_samples
|
2024-03-01 07:13:06 +08:00
|
|
|
log_items = [
|
2024-03-01 07:31:32 +08:00
|
|
|
f"step:{format_big_number(step)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of samples seen during training
|
2024-03-01 07:31:32 +08:00
|
|
|
f"smpl:{format_big_number(num_samples)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of episodes seen during training
|
2024-03-01 07:31:32 +08:00
|
|
|
f"ep:{format_big_number(num_episodes)}",
|
2024-03-01 07:13:06 +08:00
|
|
|
# number of time all unique samples are seen
|
|
|
|
f"epch:{num_epochs:.2f}",
|
|
|
|
f"∑rwrd:{avg_sum_reward:.3f}",
|
|
|
|
f"success:{pc_success:.1f}%",
|
|
|
|
f"eval_s:{eval_s:.3f}",
|
|
|
|
]
|
|
|
|
logging.info(" ".join(log_items))
|
|
|
|
|
|
|
|
info["step"] = step
|
|
|
|
info["num_samples"] = num_samples
|
|
|
|
info["num_episodes"] = num_episodes
|
|
|
|
info["num_epochs"] = num_epochs
|
|
|
|
info["is_offline"] = is_offline
|
|
|
|
|
|
|
|
logger.log_dict(info, step, mode="eval")
|
2024-02-26 09:10:09 +08:00
|
|
|
|
|
|
|
|
2024-04-10 19:34:01 +08:00
|
|
|
def calculate_online_sample_weight(n_off: int, n_on: int, pc_on: float):
|
|
|
|
"""
|
|
|
|
Calculate the sampling weight to be assigned to samples so that a specified percentage of the batch comes from online dataset (on average).
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
- n_off (int): Number of offline samples, each with a sampling weight of 1.
|
|
|
|
- n_on (int): Number of online samples.
|
|
|
|
- pc_on (float): Desired percentage of online samples in decimal form (e.g., 50% as 0.5).
|
|
|
|
|
|
|
|
The total weight of offline samples is n_off * 1.0.
|
|
|
|
The total weight of offline samples is n_on * w.
|
|
|
|
The total combined weight of all samples is n_off + n_on * w.
|
|
|
|
The fraction of the weight that is online is n_on * w / (n_off + n_on * w).
|
|
|
|
We want this fraction to equal pc_on, so we set up the equation n_on * w / (n_off + n_on * w) = pc_on.
|
|
|
|
The solution is w = - (n_off * pc_on) / (n_on * (pc_on - 1))
|
|
|
|
"""
|
|
|
|
assert 0.0 <= pc_on <= 1.0
|
|
|
|
return -(n_off * pc_on) / (n_on * (pc_on - 1))
|
|
|
|
|
|
|
|
|
2024-04-18 17:43:16 +08:00
|
|
|
def add_episodes_inplace(
|
|
|
|
online_dataset: torch.utils.data.Dataset,
|
|
|
|
concat_dataset: torch.utils.data.ConcatDataset,
|
|
|
|
sampler: torch.utils.data.WeightedRandomSampler,
|
|
|
|
hf_dataset: datasets.Dataset,
|
2024-04-23 20:13:25 +08:00
|
|
|
episode_data_index: dict[str, torch.Tensor],
|
2024-04-18 17:43:16 +08:00
|
|
|
pc_online_samples: float,
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Modifies the online_dataset, concat_dataset, and sampler in place by integrating
|
|
|
|
new episodes from hf_dataset into the online_dataset, updating the concatenated
|
|
|
|
dataset's structure and adjusting the sampling strategy based on the specified
|
|
|
|
percentage of online samples.
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
- online_dataset (torch.utils.data.Dataset): The existing online dataset to be updated.
|
|
|
|
- concat_dataset (torch.utils.data.ConcatDataset): The concatenated dataset that combines
|
|
|
|
offline and online datasets, used for sampling purposes.
|
|
|
|
- sampler (torch.utils.data.WeightedRandomSampler): A sampler that will be updated to
|
|
|
|
reflect changes in the dataset sizes and specified sampling weights.
|
|
|
|
- hf_dataset (datasets.Dataset): A Hugging Face dataset containing the new episodes to be added.
|
2024-04-23 20:13:25 +08:00
|
|
|
- episode_data_index (dict): A dictionary containing two keys ("from" and "to") associated to dataset indices.
|
|
|
|
They indicate the start index and end index of each episode in the dataset.
|
2024-04-18 17:43:16 +08:00
|
|
|
- pc_online_samples (float): The target percentage of samples that should come from
|
|
|
|
the online dataset during sampling operations.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
- AssertionError: If the first episode_id or index in hf_dataset is not 0
|
|
|
|
"""
|
2024-04-24 00:54:55 +08:00
|
|
|
first_episode_idx = hf_dataset.select_columns("episode_index")[0]["episode_index"].item()
|
|
|
|
last_episode_idx = hf_dataset.select_columns("episode_index")[-1]["episode_index"].item()
|
2024-04-18 17:43:16 +08:00
|
|
|
first_index = hf_dataset.select_columns("index")[0]["index"].item()
|
2024-04-24 00:54:55 +08:00
|
|
|
last_index = hf_dataset.select_columns("index")[-1]["index"].item()
|
|
|
|
# sanity check
|
|
|
|
assert first_episode_idx == 0, f"{first_episode_idx=} is not 0"
|
|
|
|
assert first_index == 0, f"{first_index=} is not 0"
|
|
|
|
assert first_index == episode_data_index["from"][first_episode_idx].item()
|
|
|
|
assert last_index == episode_data_index["to"][last_episode_idx].item() - 1
|
2024-04-10 19:34:01 +08:00
|
|
|
|
|
|
|
if len(online_dataset) == 0:
|
|
|
|
# initialize online dataset
|
2024-04-18 17:43:16 +08:00
|
|
|
online_dataset.hf_dataset = hf_dataset
|
2024-04-24 00:54:55 +08:00
|
|
|
online_dataset.episode_data_index = episode_data_index
|
2024-04-10 19:34:01 +08:00
|
|
|
else:
|
2024-04-24 00:54:55 +08:00
|
|
|
# get the starting indices of the new episodes and frames to be added
|
|
|
|
start_episode_idx = last_episode_idx + 1
|
|
|
|
start_index = last_index + 1
|
2024-04-16 05:26:33 +08:00
|
|
|
|
2024-04-24 00:54:55 +08:00
|
|
|
def shift_indices(episode_index, index):
|
2024-04-23 20:13:25 +08:00
|
|
|
# note: we dont shift "frame_index" since it represents the index of the frame in the episode it belongs to
|
2024-04-24 00:54:55 +08:00
|
|
|
example = {"episode_index": episode_index + start_episode_idx, "index": index + start_index}
|
2024-04-16 05:26:33 +08:00
|
|
|
return example
|
|
|
|
|
2024-04-18 17:43:16 +08:00
|
|
|
disable_progress_bars() # map has a tqdm progress bar
|
2024-04-24 00:54:55 +08:00
|
|
|
hf_dataset = hf_dataset.map(shift_indices, input_columns=["episode_index", "index"])
|
2024-04-18 17:43:16 +08:00
|
|
|
enable_progress_bars()
|
2024-04-10 19:34:01 +08:00
|
|
|
|
2024-04-23 20:13:25 +08:00
|
|
|
episode_data_index["from"] += start_index
|
|
|
|
episode_data_index["to"] += start_index
|
|
|
|
|
2024-04-10 19:34:01 +08:00
|
|
|
# extend online dataset
|
2024-04-18 17:43:16 +08:00
|
|
|
online_dataset.hf_dataset = concatenate_datasets([online_dataset.hf_dataset, hf_dataset])
|
2024-04-10 19:34:01 +08:00
|
|
|
|
|
|
|
# update the concatenated dataset length used during sampling
|
|
|
|
concat_dataset.cumulative_sizes = concat_dataset.cumsum(concat_dataset.datasets)
|
|
|
|
|
|
|
|
# update the sampling weights for each frame so that online frames get sampled a certain percentage of times
|
|
|
|
len_online = len(online_dataset)
|
|
|
|
len_offline = len(concat_dataset) - len_online
|
|
|
|
weight_offline = 1.0
|
|
|
|
weight_online = calculate_online_sample_weight(len_offline, len_online, pc_online_samples)
|
|
|
|
sampler.weights = torch.tensor([weight_offline] * len_offline + [weight_online] * len(online_dataset))
|
|
|
|
|
|
|
|
# update the total number of samples used during sampling
|
|
|
|
sampler.num_samples = len(concat_dataset)
|
|
|
|
|
|
|
|
|
2024-02-22 20:14:12 +08:00
|
|
|
def train(cfg: dict, out_dir=None, job_name=None):
|
|
|
|
if out_dir is None:
|
|
|
|
raise NotImplementedError()
|
|
|
|
if job_name is None:
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2024-03-01 07:13:06 +08:00
|
|
|
init_logging()
|
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
if cfg.training.online_steps > 0 and cfg.eval.batch_size > 1:
|
|
|
|
logging.warning("eval.batch_size > 1 not supported for online training steps")
|
|
|
|
|
2024-03-21 01:38:55 +08:00
|
|
|
# Check device is available
|
|
|
|
get_safe_torch_device(cfg.device, log=True)
|
2024-03-03 19:47:26 +08:00
|
|
|
|
2024-02-25 02:18:39 +08:00
|
|
|
torch.backends.cudnn.benchmark = True
|
2024-03-02 23:53:29 +08:00
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
2024-03-26 00:19:28 +08:00
|
|
|
set_global_seed(cfg.seed)
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-03-31 23:05:25 +08:00
|
|
|
logging.info("make_dataset")
|
2024-04-10 19:34:01 +08:00
|
|
|
offline_dataset = make_dataset(cfg)
|
2024-02-10 23:46:24 +08:00
|
|
|
|
2024-03-02 23:53:29 +08:00
|
|
|
logging.info("make_env")
|
2024-05-04 00:33:16 +08:00
|
|
|
eval_env = make_env(cfg)
|
2024-03-02 23:53:29 +08:00
|
|
|
|
|
|
|
logging.info("make_policy")
|
2024-05-01 23:17:18 +08:00
|
|
|
policy = make_policy(hydra_cfg=cfg, dataset_stats=offline_dataset.stats)
|
2024-03-02 23:53:29 +08:00
|
|
|
|
2024-04-29 18:27:58 +08:00
|
|
|
# Create optimizer and scheduler
|
|
|
|
# Temporary hack to move optimizer out of policy
|
2024-05-04 22:20:30 +08:00
|
|
|
optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
|
2024-04-29 18:27:58 +08:00
|
|
|
|
2024-03-04 18:59:43 +08:00
|
|
|
num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
|
|
|
|
num_total_params = sum(p.numel() for p in policy.parameters())
|
|
|
|
|
2024-03-02 23:53:29 +08:00
|
|
|
# log metrics to terminal and wandb
|
2024-02-29 20:37:48 +08:00
|
|
|
logger = Logger(out_dir, job_name, cfg)
|
2024-02-10 23:46:24 +08:00
|
|
|
|
2024-03-06 18:14:03 +08:00
|
|
|
log_output_dir(out_dir)
|
2024-03-04 18:59:43 +08:00
|
|
|
logging.info(f"{cfg.env.task=}")
|
2024-04-30 23:08:59 +08:00
|
|
|
logging.info(f"{cfg.training.offline_steps=} ({format_big_number(cfg.training.offline_steps)})")
|
|
|
|
logging.info(f"{cfg.training.online_steps=}")
|
2024-04-10 19:34:01 +08:00
|
|
|
logging.info(f"{offline_dataset.num_samples=} ({format_big_number(offline_dataset.num_samples)})")
|
|
|
|
logging.info(f"{offline_dataset.num_episodes=}")
|
2024-03-04 18:59:43 +08:00
|
|
|
logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})")
|
|
|
|
logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})")
|
|
|
|
|
2024-03-20 23:01:27 +08:00
|
|
|
# Note: this helper will be used in offline and online training loops.
|
2024-05-06 09:03:14 +08:00
|
|
|
def evaluate_and_checkpoint_if_needed(step):
|
2024-04-30 23:08:59 +08:00
|
|
|
if step % cfg.training.eval_freq == 0:
|
2024-03-03 21:22:09 +08:00
|
|
|
logging.info(f"Eval policy at step {step}")
|
2024-04-10 19:34:01 +08:00
|
|
|
eval_info = eval_policy(
|
2024-05-04 00:33:16 +08:00
|
|
|
eval_env,
|
2024-03-31 23:05:25 +08:00
|
|
|
policy,
|
2024-05-04 00:33:16 +08:00
|
|
|
cfg.eval.n_episodes,
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
video_dir=Path(out_dir) / "eval",
|
2024-04-10 19:34:01 +08:00
|
|
|
max_episodes_rendered=4,
|
2024-05-04 00:33:16 +08:00
|
|
|
start_seed=cfg.seed,
|
2024-02-26 09:10:09 +08:00
|
|
|
)
|
2024-04-10 19:34:01 +08:00
|
|
|
log_eval_info(logger, eval_info["aggregated"], step, cfg, offline_dataset, is_offline)
|
2024-03-01 07:13:06 +08:00
|
|
|
if cfg.wandb.enable:
|
2024-05-04 00:33:16 +08:00
|
|
|
logger.log_video(eval_info["video_paths"][0], step, mode="eval")
|
2024-03-03 21:22:09 +08:00
|
|
|
logging.info("Resume training")
|
2024-02-26 09:10:09 +08:00
|
|
|
|
2024-04-30 23:08:59 +08:00
|
|
|
if cfg.training.save_model and step % cfg.training.save_freq == 0:
|
2024-03-20 23:01:27 +08:00
|
|
|
logging.info(f"Checkpoint policy after step {step}")
|
2024-05-01 23:17:18 +08:00
|
|
|
# Note: Save with step as the identifier, and format it to have at least 6 digits but more if
|
|
|
|
# needed (choose 6 as a minimum for consistency without being overkill).
|
|
|
|
logger.save_model(
|
|
|
|
policy,
|
|
|
|
identifier=str(step).zfill(
|
|
|
|
max(6, len(str(cfg.training.offline_steps + cfg.training.online_steps)))
|
|
|
|
),
|
|
|
|
)
|
2024-03-03 21:22:09 +08:00
|
|
|
logging.info("Resume training")
|
2024-02-26 09:10:09 +08:00
|
|
|
|
2024-04-10 19:34:01 +08:00
|
|
|
# create dataloader for offline training
|
2024-03-31 23:05:25 +08:00
|
|
|
dataloader = torch.utils.data.DataLoader(
|
2024-04-10 19:34:01 +08:00
|
|
|
offline_dataset,
|
2024-04-08 21:44:10 +08:00
|
|
|
num_workers=4,
|
2024-04-30 23:08:59 +08:00
|
|
|
batch_size=cfg.training.batch_size,
|
2024-03-31 23:05:25 +08:00
|
|
|
shuffle=True,
|
|
|
|
pin_memory=cfg.device != "cpu",
|
2024-04-10 22:59:54 +08:00
|
|
|
drop_last=False,
|
2024-03-31 23:05:25 +08:00
|
|
|
)
|
|
|
|
dl_iter = cycle(dataloader)
|
2024-04-10 19:34:01 +08:00
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
policy.train()
|
2024-04-10 19:34:01 +08:00
|
|
|
step = 0 # number of policy update (forward + backward + optim)
|
|
|
|
is_offline = True
|
2024-04-30 23:08:59 +08:00
|
|
|
for offline_step in range(cfg.training.offline_steps):
|
2024-03-20 23:01:27 +08:00
|
|
|
if offline_step == 0:
|
|
|
|
logging.info("Start offline training on a fixed dataset")
|
2024-03-31 23:05:25 +08:00
|
|
|
batch = next(dl_iter)
|
|
|
|
|
|
|
|
for key in batch:
|
|
|
|
batch[key] = batch[key].to(cfg.device, non_blocking=True)
|
|
|
|
|
2024-04-30 23:08:59 +08:00
|
|
|
train_info = update_policy(policy, batch, optimizer, cfg.training.grad_clip_norm, lr_scheduler)
|
2024-03-31 23:05:25 +08:00
|
|
|
|
|
|
|
# TODO(rcadene): is it ok if step_t=0 = 0 and not 1 as previously done?
|
2024-04-30 23:08:59 +08:00
|
|
|
if step % cfg.training.log_freq == 0:
|
2024-04-10 19:34:01 +08:00
|
|
|
log_train_info(logger, train_info, step, cfg, offline_dataset, is_offline)
|
2024-03-20 23:01:27 +08:00
|
|
|
|
2024-05-06 09:03:14 +08:00
|
|
|
# Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed,
|
|
|
|
# so we pass in step + 1.
|
|
|
|
evaluate_and_checkpoint_if_needed(step + 1)
|
2024-03-20 23:01:27 +08:00
|
|
|
|
2024-02-26 09:10:09 +08:00
|
|
|
step += 1
|
|
|
|
|
2024-04-10 19:34:01 +08:00
|
|
|
# create an env dedicated to online episodes collection from policy rollout
|
2024-05-04 00:33:16 +08:00
|
|
|
online_training_env = make_env(cfg, n_envs=1)
|
2024-04-10 19:34:01 +08:00
|
|
|
|
|
|
|
# create an empty online dataset similar to offline dataset
|
|
|
|
online_dataset = deepcopy(offline_dataset)
|
2024-04-18 17:43:16 +08:00
|
|
|
online_dataset.hf_dataset = {}
|
2024-04-24 00:54:55 +08:00
|
|
|
online_dataset.episode_data_index = {}
|
2024-04-10 19:34:01 +08:00
|
|
|
|
|
|
|
# create dataloader for online training
|
|
|
|
concat_dataset = torch.utils.data.ConcatDataset([offline_dataset, online_dataset])
|
|
|
|
weights = [1.0] * len(concat_dataset)
|
|
|
|
sampler = torch.utils.data.WeightedRandomSampler(
|
|
|
|
weights, num_samples=len(concat_dataset), replacement=True
|
|
|
|
)
|
|
|
|
dataloader = torch.utils.data.DataLoader(
|
|
|
|
concat_dataset,
|
|
|
|
num_workers=4,
|
2024-04-30 23:08:59 +08:00
|
|
|
batch_size=cfg.training.batch_size,
|
2024-04-10 19:34:01 +08:00
|
|
|
sampler=sampler,
|
|
|
|
pin_memory=cfg.device != "cpu",
|
2024-04-10 22:59:54 +08:00
|
|
|
drop_last=False,
|
2024-04-10 19:34:01 +08:00
|
|
|
)
|
|
|
|
dl_iter = cycle(dataloader)
|
2024-04-05 17:31:39 +08:00
|
|
|
|
2024-03-01 07:13:06 +08:00
|
|
|
online_step = 0
|
|
|
|
is_offline = False
|
2024-04-30 23:08:59 +08:00
|
|
|
for env_step in range(cfg.training.online_steps):
|
2024-02-27 19:44:26 +08:00
|
|
|
if env_step == 0:
|
2024-03-01 07:13:06 +08:00
|
|
|
logging.info("Start online training by interacting with environment")
|
2024-04-10 19:34:01 +08:00
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
policy.eval()
|
2024-02-26 09:10:09 +08:00
|
|
|
with torch.no_grad():
|
2024-04-10 19:34:01 +08:00
|
|
|
eval_info = eval_policy(
|
2024-05-04 00:33:16 +08:00
|
|
|
online_training_env,
|
2024-04-10 19:34:01 +08:00
|
|
|
policy,
|
2024-05-04 00:33:16 +08:00
|
|
|
n_episodes=1,
|
2024-04-19 19:33:47 +08:00
|
|
|
return_episode_data=True,
|
2024-05-04 00:33:16 +08:00
|
|
|
start_seed=cfg.training.online_env_seed,
|
|
|
|
enable_progbar=True,
|
2024-02-26 09:10:09 +08:00
|
|
|
)
|
2024-03-25 07:10:16 +08:00
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
add_episodes_inplace(
|
|
|
|
online_dataset,
|
|
|
|
concat_dataset,
|
|
|
|
sampler,
|
|
|
|
hf_dataset=eval_info["episodes"]["hf_dataset"],
|
|
|
|
episode_data_index=eval_info["episodes"]["episode_data_index"],
|
|
|
|
pc_online_samples=cfg.training.online_sampling_ratio,
|
|
|
|
)
|
2024-03-25 07:10:16 +08:00
|
|
|
|
2024-05-01 23:40:04 +08:00
|
|
|
policy.train()
|
2024-04-30 23:08:59 +08:00
|
|
|
for _ in range(cfg.training.online_steps_between_rollouts):
|
2024-04-10 19:34:01 +08:00
|
|
|
batch = next(dl_iter)
|
2024-03-25 07:10:16 +08:00
|
|
|
|
2024-04-10 19:34:01 +08:00
|
|
|
for key in batch:
|
|
|
|
batch[key] = batch[key].to(cfg.device, non_blocking=True)
|
2024-02-26 09:10:09 +08:00
|
|
|
|
2024-04-30 23:08:59 +08:00
|
|
|
train_info = update_policy(policy, batch, optimizer, cfg.training.grad_clip_norm, lr_scheduler)
|
2024-02-26 09:10:09 +08:00
|
|
|
|
2024-04-30 23:08:59 +08:00
|
|
|
if step % cfg.training.log_freq == 0:
|
2024-04-10 19:34:01 +08:00
|
|
|
log_train_info(logger, train_info, step, cfg, online_dataset, is_offline)
|
2024-02-26 09:10:09 +08:00
|
|
|
|
2024-05-06 09:03:14 +08:00
|
|
|
# Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed,
|
|
|
|
# so we pass in step + 1.
|
|
|
|
evaluate_and_checkpoint_if_needed(step + 1)
|
2024-02-10 23:46:24 +08:00
|
|
|
|
2024-02-26 09:10:09 +08:00
|
|
|
step += 1
|
2024-03-01 07:13:06 +08:00
|
|
|
online_step += 1
|
2024-01-31 21:48:12 +08:00
|
|
|
|
2024-05-04 00:33:16 +08:00
|
|
|
eval_env.close()
|
|
|
|
online_training_env.close()
|
2024-03-06 18:14:03 +08:00
|
|
|
logging.info("End of training")
|
|
|
|
|
2024-01-29 20:49:30 +08:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2024-02-22 20:14:12 +08:00
|
|
|
train_cli()
|