backup wip

This commit is contained in:
Alexander Soare 2024-05-21 12:58:05 +01:00
parent 77b61e364e
commit 607bea1cb3
9 changed files with 199 additions and 87 deletions

View File

@ -19,6 +19,7 @@ build-gpu:
test-end-to-end: test-end-to-end:
${MAKE} test-act-ete-train ${MAKE} test-act-ete-train
${MAKE} test-act-ete-train-resume
${MAKE} test-act-ete-eval ${MAKE} test-act-ete-eval
${MAKE} test-act-ete-train-amp ${MAKE} test-act-ete-train-amp
${MAKE} test-act-ete-eval-amp ${MAKE} test-act-ete-eval-amp
@ -46,9 +47,16 @@ test-act-ete-train:
training.batch_size=2 \ training.batch_size=2 \
hydra.run.dir=tests/outputs/act/ hydra.run.dir=tests/outputs/act/
test-act-ete-train-resume:
python lerobot/scripts/train.py \
hydra.run.dir=tests/outputs/act/ \
training.offline_steps=4 \
resume=true
test-act-ete-eval: test-act-ete-eval:
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py \
-p tests/outputs/act/checkpoints/000002 \ -p tests/outputs/act/checkpoints/000002/pretrained_model \
eval.n_episodes=1 \ eval.n_episodes=1 \
eval.batch_size=1 \ eval.batch_size=1 \
env.episode_length=8 \ env.episode_length=8 \
@ -75,7 +83,7 @@ test-act-ete-train-amp:
test-act-ete-eval-amp: test-act-ete-eval-amp:
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py \
-p tests/outputs/act/checkpoints/000002 \ -p tests/outputs/act/checkpoints/000002/pretrained_model \
eval.n_episodes=1 \ eval.n_episodes=1 \
eval.batch_size=1 \ eval.batch_size=1 \
env.episode_length=8 \ env.episode_length=8 \
@ -102,7 +110,7 @@ test-diffusion-ete-train:
test-diffusion-ete-eval: test-diffusion-ete-eval:
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py \
-p tests/outputs/diffusion/checkpoints/000002 \ -p tests/outputs/diffusion/checkpoints/000002/pretrained_model \
eval.n_episodes=1 \ eval.n_episodes=1 \
eval.batch_size=1 \ eval.batch_size=1 \
env.episode_length=8 \ env.episode_length=8 \
@ -129,7 +137,7 @@ test-tdmpc-ete-train:
test-tdmpc-ete-eval: test-tdmpc-ete-eval:
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py \
-p tests/outputs/tdmpc/checkpoints/000002 \ -p tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
eval.n_episodes=1 \ eval.n_episodes=1 \
eval.batch_size=1 \ eval.batch_size=1 \
env.episode_length=8 \ env.episode_length=8 \

View File

@ -149,9 +149,9 @@ python lerobot/scripts/eval.py \
``` ```
Note: After training your own policy, you can re-evaluate the checkpoints with: Note: After training your own policy, you can re-evaluate the checkpoints with:
```bash ```bash
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py -p {OUTPUT_DIR}/checkpoints/last/pretrained_model
-p PATH/TO/TRAIN/OUTPUT/FOLDER
``` ```
See `python lerobot/scripts/eval.py --help` for more instructions. See `python lerobot/scripts/eval.py --help` for more instructions.
@ -174,6 +174,19 @@ The experiment directory is automatically generated and will show up in yellow i
hydra.run.dir=your/new/experiment/dir hydra.run.dir=your/new/experiment/dir
``` ```
In the experiment directory there will be a folder called `checkpoints` which will have the following structure:
```bash
checkpoints
├── 000250 # checkpoint_dir for training step 250
│ ├── pretrained_model # Hugging Face pretrained model dir
│ │ ├── config.json # Hugging Face pretrained model config
│ │ ├── config.yaml # consolidated Hydra config
│ │ ├── model.safetensors # model weights
│ │ └── README.md # Hugging Face model card
│ └── training_state.pth # optimizer/scheduler/rng state and training step
```
To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding: To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding:
```bash ```bash

View File

@ -21,6 +21,19 @@ from omegaconf import OmegaConf
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
def resolve_delta_timestamps(cfg):
"""Resolves delta_timestamps config key (in-place) by using `eval`.
Doesn't do anything if delta_timestamps is not specified or has already been resolve (as evidenced by
the data type of its values).
"""
delta_timestamps = cfg.training.get("delta_timestamps")
if delta_timestamps is not None:
for key in delta_timestamps:
if isinstance(delta_timestamps[key], str):
cfg.training.delta_timestamps[key] = eval(delta_timestamps[key])
def make_dataset( def make_dataset(
cfg, cfg,
split="train", split="train",
@ -31,18 +44,14 @@ def make_dataset(
f"environment ({cfg.env.name=})." f"environment ({cfg.env.name=})."
) )
delta_timestamps = cfg.training.get("delta_timestamps") resolve_delta_timestamps(cfg)
if delta_timestamps is not None:
for key in delta_timestamps:
if isinstance(delta_timestamps[key], str):
delta_timestamps[key] = eval(delta_timestamps[key])
# TODO(rcadene): add data augmentations # TODO(rcadene): add data augmentations
dataset = LeRobotDataset( dataset = LeRobotDataset(
cfg.dataset_repo_id, cfg.dataset_repo_id,
split=split, split=split,
delta_timestamps=delta_timestamps, delta_timestamps=cfg.training.get("delta_timestamps"),
) )
if cfg.get("override_dataset_stats"): if cfg.get("override_dataset_stats"):

View File

@ -26,7 +26,7 @@ from pathlib import Path
import torch import torch
from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE from huggingface_hub.constants import SAFETENSORS_SINGLE_FILE
from omegaconf import OmegaConf from omegaconf import DictConfig, OmegaConf
from termcolor import colored from termcolor import colored
from torch.optim import Optimizer from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler from torch.optim.lr_scheduler import LRScheduler
@ -35,7 +35,11 @@ from lerobot.common.policies.policy_protocol import Policy
from lerobot.common.utils.utils import get_global_random_state, set_global_random_state from lerobot.common.utils.utils import get_global_random_state, set_global_random_state
def cfg_to_group(cfg, return_list=False): def log_output_dir(out_dir):
logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {out_dir}")
def cfg_to_group(cfg: DictConfig, return_list: bool = False) -> list[str] | str:
"""Return a group name for logging. Optionally returns group name as list.""" """Return a group name for logging. Optionally returns group name as list."""
lst = [ lst = [
f"policy:{cfg.policy.name}", f"policy:{cfg.policy.name}",
@ -46,21 +50,34 @@ def cfg_to_group(cfg, return_list=False):
return lst if return_list else "-".join(lst) return lst if return_list else "-".join(lst)
def get_wandb_run_id_from_filesystem(checkpoint_dir: Path) -> str:
# Get the WandB run ID.
paths = glob(str(checkpoint_dir / "../wandb/latest-run/run-*"))
if len(paths) != 1:
raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
match = re.search(r"run-([^\.]+).wandb", paths[0].split("/")[-1])
if match is None:
raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
wandb_run_id = match.groups(0)[0]
return wandb_run_id
class Logger: class Logger:
"""Primary logger object. Logs either locally or using wandb.""" """Primary logger object. Logs either locally or using wandb."""
def __init__(self, log_dir, job_name, cfg): pretrained_model_dir_name = "pretrained_model"
self._log_dir = Path(log_dir) training_state_file_name = "training_state.pth"
self._log_dir.mkdir(parents=True, exist_ok=True)
self._job_name = job_name def __init__(self, log_dir: str, job_name: str, cfg: DictConfig):
self._checkpoint_dir = self._log_dir / "checkpoints"
self._last_checkpoint_path = self._checkpoint_dir / "last"
self._disable_wandb_artifact = cfg.wandb.disable_artifact
self._group = cfg_to_group(cfg)
self._seed = cfg.seed
self._cfg = cfg self._cfg = cfg
self.log_dir = Path(log_dir)
self.log_dir.mkdir(parents=True, exist_ok=True)
self.checkpoints_dir = self.log_dir / "checkpoints"
self.last_checkpoint_dir = self.checkpoints_dir / "last"
self.last_pretrained_model_dir = self.last_checkpoint_dir / self.pretrained_model_dir_name
# Set up WandB. # Set up WandB.
self._group = cfg_to_group(cfg)
project = cfg.get("wandb", {}).get("project") project = cfg.get("wandb", {}).get("project")
entity = cfg.get("wandb", {}).get("entity") entity = cfg.get("wandb", {}).get("entity")
enable_wandb = cfg.get("wandb", {}).get("enable", False) enable_wandb = cfg.get("wandb", {}).get("enable", False)
@ -74,14 +91,7 @@ class Logger:
wandb_run_id = None wandb_run_id = None
if cfg.resume: if cfg.resume:
# Get the WandB run ID. wandb_run_id = get_wandb_run_id_from_filesystem(self.checkpoints_dir)
paths = glob(str(self._checkpoint_dir / "../wandb/latest-run/run-*"))
if len(paths) != 1:
raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
match = re.search(r"run-([^\.]+).wandb", paths[0].split("/")[-1])
if match is None:
raise RuntimeError("Couldn't get the previous WandB run ID for run resumption.")
wandb_run_id = match.groups(0)[0]
wandb.init( wandb.init(
id=wandb_run_id, id=wandb_run_id,
@ -89,46 +99,49 @@ class Logger:
entity=entity, entity=entity,
name=job_name, name=job_name,
notes=cfg.get("wandb", {}).get("notes"), notes=cfg.get("wandb", {}).get("notes"),
# group=self._group,
tags=cfg_to_group(cfg, return_list=True), tags=cfg_to_group(cfg, return_list=True),
dir=self._log_dir, dir=log_dir,
config=OmegaConf.to_container(cfg, resolve=True), config=OmegaConf.to_container(cfg, resolve=True),
# TODO(rcadene): try set to True # TODO(rcadene): try set to True
save_code=False, save_code=False,
# TODO(rcadene): split train and eval, and run async eval with job_type="eval" # TODO(rcadene): split train and eval, and run async eval with job_type="eval"
job_type="train_eval", job_type="train_eval",
# TODO(rcadene): add resume option
resume="must" if cfg.resume else None, resume="must" if cfg.resume else None,
) )
print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"])) print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"]))
logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}") logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}")
self._wandb = wandb self._wandb = wandb
@property def save_model(self, save_dir: Path, policy: Policy, wandb_artifact_name: str | None = None):
def last_checkpoint_path(self) -> Path: """Save the weights of the Policy model using PyTorchModelHubMixin.
return self._last_checkpoint_path
def save_model(self, policy: Policy, identifier: str): The weights are saved in a folder called "pretrained_model" under the checkpoint directory.
self._checkpoint_dir.mkdir(parents=True, exist_ok=True)
save_dir = self._checkpoint_dir / str(identifier) Optionally also upload the model to WandB.
"""
self.checkpoints_dir.mkdir(parents=True, exist_ok=True)
policy.save_pretrained(save_dir) policy.save_pretrained(save_dir)
# Also save the full Hydra config for the env configuration. # Also save the full Hydra config for the env configuration.
OmegaConf.save(self._cfg, save_dir / "config.yaml") OmegaConf.save(self._cfg, save_dir / "config.yaml")
if self._wandb and not self._disable_wandb_artifact: if self._wandb and not self._cfg.wandb.disable_artifact:
# note wandb artifact does not accept ":" or "/" in its name # note wandb artifact does not accept ":" or "/" in its name
artifact = self._wandb.Artifact( artifact = self._wandb.Artifact(wandb_artifact_name, type="model")
f"{self._group.replace(':', '_').replace('/', '_')}-{self._seed}-{identifier}",
type="model",
)
artifact.add_file(save_dir / SAFETENSORS_SINGLE_FILE) artifact.add_file(save_dir / SAFETENSORS_SINGLE_FILE)
self._wandb.log_artifact(artifact) self._wandb.log_artifact(artifact)
if self._last_checkpoint_path.exists(): if self.last_checkpoint_dir.exists():
os.remove(self._last_checkpoint_path) os.remove(self.last_checkpoint_dir)
os.symlink(save_dir.absolute(), self._last_checkpoint_path) # TODO(now): Check this works
def save_training_state( def save_training_state(
self, train_step: int, optimizer: Optimizer, scheduler: LRScheduler | None, identifier: str self,
save_dir: Path,
train_step: int,
optimizer: Optimizer,
scheduler: LRScheduler | None,
): ):
"""Checkpoint the global training_step, optimizer state, scheduler state, and random state.
All of these are saved as "training_state.pth" under the checkpoint directory.
"""
training_state = { training_state = {
"step": train_step, "step": train_step,
"optimizer": optimizer.state_dict(), "optimizer": optimizer.state_dict(),
@ -136,14 +149,35 @@ class Logger:
} }
if scheduler is not None: if scheduler is not None:
training_state["scheduler"] = scheduler.state_dict() training_state["scheduler"] = scheduler.state_dict()
torch.save(training_state, self._checkpoint_dir / str(identifier) / "training_state.pth") torch.save(training_state, save_dir / self.training_state_file_name)
def save_checkpont(
self,
train_step: int,
policy: Policy,
optimizer: Optimizer,
scheduler: LRScheduler | None,
identifier: str,
):
"""Checkpoint the model weights and the training state."""
checkpoint_dir = self.checkpoints_dir / str(identifier)
wandb_artifact_name = (
None
if self._wandb is None
else f"{self._group.replace(':', '_').replace('/', '_')}-{self._cfg.seed}-{identifier}"
)
self.save_model(
checkpoint_dir / self.pretrained_model_dir_name, policy, wandb_artifact_name=wandb_artifact_name
)
self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler)
os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir)
def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int: def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int:
""" """
Load the optimizer and scheduler state_dict from the last checkpoint, set the random state, and return Given the last checkpoint in the logging directory, load the optimizer state, scheduler state, and
the global training step. random state, and return the global training step.
""" """
training_state = torch.load(self._checkpoint_dir / "last" / "training_state.pth") training_state = torch.load(self.last_checkpoint_dir / self.training_state_file_name)
optimizer.load_state_dict(training_state["optimizer"]) optimizer.load_state_dict(training_state["optimizer"])
if scheduler is not None: if scheduler is not None:
scheduler.load_state_dict(training_state["scheduler"]) scheduler.load_state_dict(training_state["scheduler"])
@ -155,19 +189,9 @@ class Logger:
set_global_random_state({k: training_state[k] for k in get_global_random_state()}) set_global_random_state({k: training_state[k] for k in get_global_random_state()})
return training_state["step"] return training_state["step"]
def save_checkpont(
self,
train_step: int,
policy: Policy,
optimizer: Optimizer,
scheduler: LRScheduler | None,
identifier: str,
):
self.save_model(policy, identifier)
self.save_training_state(train_step, optimizer, scheduler, identifier)
def log_dict(self, d, step, mode="train"): def log_dict(self, d, step, mode="train"):
assert mode in {"train", "eval"} assert mode in {"train", "eval"}
# TODO(alexander-soare): Add local text log.
if self._wandb is not None: if self._wandb is not None:
for k, v in d.items(): for k, v in d.items():
if not isinstance(v, (int, float, str)): if not isinstance(v, (int, float, str)):

View File

@ -13,10 +13,8 @@ hydra:
# Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure
# `hydra.run.dir` is the directory of an existing run with at least one checkpoint in it. # `hydra.run.dir` is the directory of an existing run with at least one checkpoint in it.
# Note that run resumption works by grabbing the configuration file from # Note that when resuming a run, the provided configuration takes precedence over the checkpoint
# {hydra.run.dir}/checkpoints/{specific_checkpoint_dir}/config.yaml. Any differences between the provided # configuration.
# configuration and the prior configuration (apart from the resume parameter itself) are ignored. If you wish
# to change something, you can consider modifying the configuration in the file directly.
resume: false resume: false
device: cuda # cpu device: cuda # cpu
# `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,

View File

@ -28,7 +28,7 @@ OR, you want to evaluate a model checkpoint from the LeRobot training script for
``` ```
python lerobot/scripts/eval.py \ python lerobot/scripts/eval.py \
-p outputs/train/diffusion_pusht/checkpoints/005000 \ -p outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \
eval.n_episodes=10 eval.n_episodes=10
``` ```

View File

@ -18,13 +18,16 @@ import time
from contextlib import nullcontext from contextlib import nullcontext
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
from pprint import pformat
import hydra import hydra
import torch import torch
from omegaconf import DictConfig from deepdiff import DeepDiff
from omegaconf import DictConfig, OmegaConf
from termcolor import colored
from torch.cuda.amp import GradScaler from torch.cuda.amp import GradScaler
from lerobot.common.datasets.factory import make_dataset from lerobot.common.datasets.factory import make_dataset, resolve_delta_timestamps
from lerobot.common.datasets.utils import cycle from lerobot.common.datasets.utils import cycle
from lerobot.common.envs.factory import make_env from lerobot.common.envs.factory import make_env
from lerobot.common.logger import Logger, log_output_dir from lerobot.common.logger import Logger, log_output_dir
@ -223,6 +226,42 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No
# log metrics to terminal and wandb # log metrics to terminal and wandb
logger = Logger(out_dir, job_name, cfg) logger = Logger(out_dir, job_name, cfg)
# If we are resuming a run, we need to check that a checkpoint exists in the log directory, and we need
# to check for any differences between the provided config and the checkpoint's config.
if cfg.resume:
if not logger.last_checkpoint_dir.exists():
raise RuntimeError(
f"You have set resume=True, but there is no model checpoint in {logger.last_checkpoint_dir}."
)
else:
checkpoint_cfg_path = str(logger.last_pretrained_model_dir / "config.yaml")
logging.info(
colored(
"You have set resume=True, indicating that you wish to resume a run. The provided config "
f"is being overriden by {checkpoint_cfg_path}",
color="yellow",
attrs=["bold"],
)
)
# Get the configuration file from the last checkpoint.
checkpoint_cfg = init_hydra_config(checkpoint_cfg_path)
# Hack to resolve the delta_timestamps ahead of time in order to properly diff.
resolve_delta_timestamps(cfg)
diff = DeepDiff(OmegaConf.to_container(checkpoint_cfg), OmegaConf.to_container(cfg))
if len(diff) > 0:
# Log a warning about differences between the checkpoint configuration and the provided
# configuration (but ignore the `resume` parameter).
if "values_changed" in diff and "root['resume']" in diff["values_changed"]:
del diff["values_changed"]["root['resume']"]
logging.warning(
colored(
"At least one difference was detected between the checkpoint configuration and the "
f"provided configuration: \n{pformat(diff)}\nNote that the provided configuration "
"takes precedence.",
color="yellow",
)
)
if cfg.training.online_steps > 0: if cfg.training.online_steps > 0:
raise NotImplementedError("Online training is not implemented yet.") raise NotImplementedError("Online training is not implemented yet.")
@ -244,7 +283,7 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No
policy = make_policy( policy = make_policy(
hydra_cfg=cfg, hydra_cfg=cfg,
dataset_stats=offline_dataset.stats if not cfg.resume else None, dataset_stats=offline_dataset.stats if not cfg.resume else None,
pretrained_policy_name_or_path=str(logger.last_checkpoint_path) if cfg.resume else None, pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None,
) )
# Create optimizer and scheduler # Create optimizer and scheduler
@ -255,16 +294,6 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No
step = 0 # number of policy updates (forward + backward + optim) step = 0 # number of policy updates (forward + backward + optim)
if cfg.resume: if cfg.resume:
print("You have set resume=True, indicating that you wish to resume a run.")
# Make sure there is a checkpoint.
if not logger.last_checkpoint_path.exists():
raise RuntimeError(
f"You have set resume=True, but {str(logger.last_checkpoint_path)} does not exist."
)
# Get the configuration file from the last checkpoint.
checkpoint_cfg = init_hydra_config(str(logger.last_checkpoint_path / "config.yaml"))
# TODO(now): Do a diff check.
cfg = checkpoint_cfg
step = logger.load_last_training_state(optimizer, lr_scheduler) step = logger.load_last_training_state(optimizer, lr_scheduler)
num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad)
@ -343,7 +372,6 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No
use_amp=cfg.use_amp, use_amp=cfg.use_amp,
) )
# TODO(rcadene): is it ok if step_t=0 = 0 and not 1 as previously done?
if step % cfg.training.log_freq == 0: if step % cfg.training.log_freq == 0:
log_train_info(logger, train_info, step, cfg, offline_dataset, is_offline) log_train_info(logger, train_info, step, cfg, offline_dataset, is_offline)

39
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. # This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]] [[package]]
name = "absl-py" name = "absl-py"
@ -595,6 +595,24 @@ files = [
{file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"}, {file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"},
] ]
[[package]]
name = "deepdiff"
version = "7.0.1"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
files = [
{file = "deepdiff-7.0.1-py3-none-any.whl", hash = "sha256:447760081918216aa4fd4ca78a4b6a848b81307b2ea94c810255334b759e1dc3"},
{file = "deepdiff-7.0.1.tar.gz", hash = "sha256:260c16f052d4badbf60351b4f77e8390bee03a0b516246f6839bc813fb429ddf"},
]
[package.dependencies]
ordered-set = ">=4.1.0,<4.2.0"
[package.extras]
cli = ["click (==8.1.7)", "pyyaml (==6.0.1)"]
optimize = ["orjson"]
[[package]] [[package]]
name = "diffusers" name = "diffusers"
version = "0.27.2" version = "0.27.2"
@ -1673,6 +1691,7 @@ files = [
{file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9e2addd2d1866fe112bc6f80117bcc6bc25191c5ed1bfbcf9f1386a884252ae8"}, {file = "lxml-5.2.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:9e2addd2d1866fe112bc6f80117bcc6bc25191c5ed1bfbcf9f1386a884252ae8"},
{file = "lxml-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:f51969bac61441fd31f028d7b3b45962f3ecebf691a510495e5d2cd8c8092dbd"}, {file = "lxml-5.2.1-cp37-cp37m-win32.whl", hash = "sha256:f51969bac61441fd31f028d7b3b45962f3ecebf691a510495e5d2cd8c8092dbd"},
{file = "lxml-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:b0b58fbfa1bf7367dde8a557994e3b1637294be6cf2169810375caf8571a085c"}, {file = "lxml-5.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:b0b58fbfa1bf7367dde8a557994e3b1637294be6cf2169810375caf8571a085c"},
{file = "lxml-5.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3e183c6e3298a2ed5af9d7a356ea823bccaab4ec2349dc9ed83999fd289d14d5"},
{file = "lxml-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:804f74efe22b6a227306dd890eecc4f8c59ff25ca35f1f14e7482bbce96ef10b"}, {file = "lxml-5.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:804f74efe22b6a227306dd890eecc4f8c59ff25ca35f1f14e7482bbce96ef10b"},
{file = "lxml-5.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08802f0c56ed150cc6885ae0788a321b73505d2263ee56dad84d200cab11c07a"}, {file = "lxml-5.2.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08802f0c56ed150cc6885ae0788a321b73505d2263ee56dad84d200cab11c07a"},
{file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8c09ed18ecb4ebf23e02b8e7a22a05d6411911e6fabef3a36e4f371f4f2585"}, {file = "lxml-5.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8c09ed18ecb4ebf23e02b8e7a22a05d6411911e6fabef3a36e4f371f4f2585"},
@ -2367,6 +2386,20 @@ numpy = [
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
] ]
[[package]]
name = "ordered-set"
version = "4.1.0"
description = "An OrderedSet is a custom MutableSet that remembers its order, so that every"
optional = false
python-versions = ">=3.7"
files = [
{file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"},
{file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"},
]
[package.extras]
dev = ["black", "mypy", "pytest"]
[[package]] [[package]]
name = "packaging" name = "packaging"
version = "24.0" version = "24.0"
@ -2386,7 +2419,6 @@ optional = false
python-versions = ">=3.9" python-versions = ">=3.9"
files = [ files = [
{file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"},
{file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"},
{file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"},
{file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"},
{file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"},
@ -2407,7 +2439,6 @@ files = [
{file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"},
{file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"},
{file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"},
{file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"},
{file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"},
{file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"},
{file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"},
@ -4248,4 +4279,4 @@ xarm = ["gym-xarm"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.10,<3.13" python-versions = ">=3.10,<3.13"
content-hash = "e4834d67df32c8c617c259b0e59bb33ddaccde08fe940d771e74046cbffe3399" content-hash = "d3b6f4bf0106b043aed7ad0c65e236d0409b96dff1dfdf44c750ef19b0cb8772"

View File

@ -58,6 +58,7 @@ imagecodecs = { version = ">=2024.1.1", optional = true }
pyav = ">=12.0.5" pyav = ">=12.0.5"
moviepy = ">=1.0.3" moviepy = ">=1.0.3"
rerun-sdk = ">=0.15.1" rerun-sdk = ">=0.15.1"
deepdiff = ">=7.0.1"
[tool.poetry.extras] [tool.poetry.extras]