Convert datasets to av1 encoding (#302)

This commit is contained in:
Simon Alibert 2024-07-22 20:08:59 +02:00 committed by GitHub
parent 461d5472d3
commit 0b21210d72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
571 changed files with 988 additions and 1311 deletions

View File

@ -180,8 +180,10 @@ dataset attributes:
│ ├ observation.images.cam_high: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.}
│ ...
├ info: a dictionary of metadata on the dataset
│ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with
│ ├ fps (float): frame per second the dataset is recorded/synchronized to
│ └ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files
│ ├ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files
│ └ encoding (dict): if video, this documents the main options that were used with ffmpeg to encode the videos
├ videos_dir (Path): where the mp4 videos or png images are stored/accessed
└ camera_keys (list of string): the keys to access camera features in the item returned by the dataset (e.g. `["observation.images.cam_high", ...]`)
```

View File

@ -257,10 +257,10 @@ def benchmark_encoding_decoding(
imgs_dir=imgs_dir,
video_path=video_path,
fps=fps,
video_codec=encoding_cfg["vcodec"],
pixel_format=encoding_cfg["pix_fmt"],
group_of_pictures_size=encoding_cfg.get("g"),
constant_rate_factor=encoding_cfg.get("crf"),
vcodec=encoding_cfg["vcodec"],
pix_fmt=encoding_cfg["pix_fmt"],
g=encoding_cfg.get("g"),
crf=encoding_cfg.get("crf"),
# fast_decode=encoding_cfg.get("fastdecode"),
overwrite=True,
)

View File

@ -125,6 +125,10 @@ available_real_world_datasets = [
"lerobot/aloha_static_vinh_cup_left",
"lerobot/aloha_static_ziploc_slide",
"lerobot/umi_cup_in_the_wild",
"lerobot/unitreeh1_fold_clothes",
"lerobot/unitreeh1_rearrange_objects",
"lerobot/unitreeh1_two_robot_greeting",
"lerobot/unitreeh1_warehouse",
]
available_datasets = list(

View File

@ -35,9 +35,8 @@ from lerobot.common.datasets.utils import (
)
from lerobot.common.datasets.video_utils import VideoFrame, load_from_videos
# For maintainers, see lerobot/common/datasets/push_dataset_to_hub/codebase_version.md
CODEBASE_VERSION = "v1.5"
# For maintainers, see lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md
CODEBASE_VERSION = "v1.6"
DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None

View File

@ -10,7 +10,8 @@ For instance, [`lerobot/pusht`](https://huggingface.co/datasets/lerobot/pusht) h
- [v1.2](https://huggingface.co/datasets/lerobot/pusht/tree/v1.2)
- [v1.3](https://huggingface.co/datasets/lerobot/pusht/tree/v1.3)
- [v1.4](https://huggingface.co/datasets/lerobot/pusht/tree/v1.4)
- [v1.5](https://huggingface.co/datasets/lerobot/pusht/tree/v1.5) <-- last version
- [v1.5](https://huggingface.co/datasets/lerobot/pusht/tree/v1.5)
- [v1.6](https://huggingface.co/datasets/lerobot/pusht/tree/v1.6) <-- last version
- [main](https://huggingface.co/datasets/lerobot/pusht/tree/main) <-- points to the last version
Starting with v1.6, every dataset pushed to the hub or saved locally also have this version number in their
@ -45,13 +46,11 @@ for repo_id in available_datasets:
dataset_info = api.list_repo_refs(repo_id, repo_type="dataset")
branches = [b.name for b in dataset_info.branches]
if CODEBASE_VERSION in branches:
# First check if the newer version already exists.
print(f"Found existing branch for {repo_id}. Please contact a member of the core LeRobot team.")
print("Exiting early")
break
print(f"{repo_id} already @{CODEBASE_VERSION}, skipping.")
continue
else:
# Now create a branch named after the new version by branching out from "main"
# which is expected to be the preceding version
api.create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION, revision="main")
print(f"{repo_id} successfully updated")
print(f"{repo_id} successfully updated @{CODEBASE_VERSION}")
```

View File

@ -19,8 +19,8 @@ This file contains download scripts for raw datasets.
Example of usage:
```
python lerobot/common/datasets/push_dataset_to_hub/_download_raw.py \
--raw-dir data/cadene/pusht_raw \
--repo-id cadene/pusht_raw
--raw-dir data/lerobot-raw/pusht_raw \
--repo-id lerobot-raw/pusht_raw
```
"""
@ -31,63 +31,65 @@ from pathlib import Path
from huggingface_hub import snapshot_download
AVAILABLE_RAW_REPO_IDS = [
"lerobot-raw/aloha_mobile_cabinet_raw",
"lerobot-raw/aloha_mobile_chair_raw",
"lerobot-raw/aloha_mobile_elevator_raw",
"lerobot-raw/aloha_mobile_shrimp_raw",
"lerobot-raw/aloha_mobile_wash_pan_raw",
"lerobot-raw/aloha_mobile_wipe_wine_raw",
"lerobot-raw/aloha_sim_insertion_human_raw",
"lerobot-raw/aloha_sim_insertion_scripted_raw",
"lerobot-raw/aloha_sim_transfer_cube_human_raw",
"lerobot-raw/aloha_sim_transfer_cube_scripted_raw",
"lerobot-raw/aloha_static_battery_raw",
"lerobot-raw/aloha_static_candy_raw",
"lerobot-raw/aloha_static_coffee_new_raw",
"lerobot-raw/aloha_static_coffee_raw",
"lerobot-raw/aloha_static_cups_open_raw",
"lerobot-raw/aloha_static_fork_pick_up_raw",
"lerobot-raw/aloha_static_pingpong_test_raw",
"lerobot-raw/aloha_static_pro_pencil_raw",
"lerobot-raw/aloha_static_screw_driver_raw",
"lerobot-raw/aloha_static_tape_raw",
"lerobot-raw/aloha_static_thread_velcro_raw",
"lerobot-raw/aloha_static_towel_raw",
"lerobot-raw/aloha_static_vinh_cup_left_raw",
"lerobot-raw/aloha_static_vinh_cup_raw",
"lerobot-raw/aloha_static_ziploc_slide_raw",
"lerobot-raw/pusht_raw",
"lerobot-raw/umi_cup_in_the_wild_raw",
"lerobot-raw/unitreeh1_fold_clothes_raw",
"lerobot-raw/unitreeh1_rearrange_objects_raw",
"lerobot-raw/unitreeh1_two_robot_greeting_raw",
"lerobot-raw/unitreeh1_warehouse_raw",
"lerobot-raw/xarm_lift_medium_raw",
"lerobot-raw/xarm_lift_medium_replay_raw",
"lerobot-raw/xarm_push_medium_raw",
"lerobot-raw/xarm_push_medium_replay_raw",
]
from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
# {raw_repo_id: raw_format}
AVAILABLE_RAW_REPO_IDS = {
"lerobot-raw/aloha_mobile_cabinet_raw": "aloha_hdf5",
"lerobot-raw/aloha_mobile_chair_raw": "aloha_hdf5",
"lerobot-raw/aloha_mobile_elevator_raw": "aloha_hdf5",
"lerobot-raw/aloha_mobile_shrimp_raw": "aloha_hdf5",
"lerobot-raw/aloha_mobile_wash_pan_raw": "aloha_hdf5",
"lerobot-raw/aloha_mobile_wipe_wine_raw": "aloha_hdf5",
"lerobot-raw/aloha_sim_insertion_human_raw": "aloha_hdf5",
"lerobot-raw/aloha_sim_insertion_scripted_raw": "aloha_hdf5",
"lerobot-raw/aloha_sim_transfer_cube_human_raw": "aloha_hdf5",
"lerobot-raw/aloha_sim_transfer_cube_scripted_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_battery_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_candy_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_coffee_new_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_coffee_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_cups_open_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_fork_pick_up_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_pingpong_test_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_pro_pencil_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_screw_driver_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_tape_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_thread_velcro_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_towel_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_vinh_cup_left_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_vinh_cup_raw": "aloha_hdf5",
"lerobot-raw/aloha_static_ziploc_slide_raw": "aloha_hdf5",
"lerobot-raw/pusht_raw": "pusht_zarr",
"lerobot-raw/umi_cup_in_the_wild_raw": "umi_zarr",
"lerobot-raw/unitreeh1_fold_clothes_raw": "aloha_hdf5",
"lerobot-raw/unitreeh1_rearrange_objects_raw": "aloha_hdf5",
"lerobot-raw/unitreeh1_two_robot_greeting_raw": "aloha_hdf5",
"lerobot-raw/unitreeh1_warehouse_raw": "aloha_hdf5",
"lerobot-raw/xarm_lift_medium_raw": "xarm_pkl",
"lerobot-raw/xarm_lift_medium_replay_raw": "xarm_pkl",
"lerobot-raw/xarm_push_medium_raw": "xarm_pkl",
"lerobot-raw/xarm_push_medium_replay_raw": "xarm_pkl",
}
def download_raw(raw_dir: Path, repo_id: str):
# Check repo_id is well formated
if len(repo_id.split("/")) != 2:
raise ValueError(
f"`repo_id` is expected to contain a community or user id `/` the name of the dataset (e.g. 'lerobot/pusht'), but contains '{repo_id}'."
)
check_repo_id(repo_id)
user_id, dataset_id = repo_id.split("/")
if not dataset_id.endswith("_raw"):
warnings.warn(
f"`dataset_id` ({dataset_id}) doesn't end with '_raw' (e.g. 'lerobot/pusht_raw'). Following this naming convention by renaming your repository is advised, but not mandatory.",
f"""`dataset_id` ({dataset_id}) doesn't end with '_raw' (e.g. 'lerobot/pusht_raw'). Following this
naming convention by renaming your repository is advised, but not mandatory.""",
stacklevel=1,
)
# Send warning if raw_dir isn't well formated
if raw_dir.parts[-2] != user_id or raw_dir.parts[-1] != dataset_id:
warnings.warn(
f"`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that match the `repo_id` (e.g. 'data/lerobot/pusht_raw'). Following this naming convention is advised, but not mandatory.",
f"""`raw_dir` ({raw_dir}) doesn't contain a community or user id `/` the name of the dataset that
match the `repo_id` (e.g. 'data/lerobot/pusht_raw'). Following this naming convention is advised,
but not mandatory.""",
stacklevel=1,
)
raw_dir.mkdir(parents=True, exist_ok=True)
@ -97,8 +99,9 @@ def download_raw(raw_dir: Path, repo_id: str):
logging.info(f"Finish downloading from huggingface.co/{user_id} for {dataset_id}")
def download_all_raw_datasets():
data_dir = Path("data")
def download_all_raw_datasets(data_dir: Path | None = None):
if data_dir is None:
data_dir = Path("data")
for repo_id in AVAILABLE_RAW_REPO_IDS:
raw_dir = data_dir / repo_id
download_raw(raw_dir, repo_id)
@ -106,7 +109,8 @@ def download_all_raw_datasets():
def main():
parser = argparse.ArgumentParser(
description=f"A script to download raw datasets from Hugging Face hub to a local directory. Here is a non exhaustive list of available repositories to use in `--repo-id`: {AVAILABLE_RAW_REPO_IDS}",
description=f"""A script to download raw datasets from Hugging Face hub to a local directory. Here is a
non exhaustive list of available repositories to use in `--repo-id`: {AVAILABLE_RAW_REPO_IDS}""",
)
parser.add_argument(
@ -119,7 +123,8 @@ def main():
"--repo-id",
type=str,
required=True,
help="Repositery identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht_raw`, `cadene/aloha_sim_insertion_human_raw`).",
help="""Repositery identifier on Hugging Face: a community or a user name `/` the name of
the dataset (e.g. `lerobot/pusht_raw`, `cadene/aloha_sim_insertion_human_raw`).""",
)
args = parser.parse_args()
download_raw(**vars(args))

View File

@ -0,0 +1,184 @@
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Use this script to batch encode lerobot dataset from their raw format to LeRobotDataset and push their updated
version to the hub. Under the hood, this script reuses 'push_dataset_to_hub.py'. It assumes that you already
downloaded raw datasets, which you can do with the related '_download_raw.py' script.
For instance, for codebase_version = 'v1.6', the following command was run, assuming raw datasets from
lerobot-raw were downloaded in 'raw/datasets/directory':
```bash
python lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py \
--raw-dir raw/datasets/directory \
--raw-repo-ids lerobot-raw \
--local-dir push/datasets/directory \
--tests-data-dir tests/data \
--push-repo lerobot \
--vcodec libsvtav1 \
--pix-fmt yuv420p \
--g 2 \
--crf 30
```
"""
import argparse
from pathlib import Path
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub._download_raw import AVAILABLE_RAW_REPO_IDS
from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub
def get_push_repo_id_from_raw(raw_repo_id: str, push_repo: str) -> str:
dataset_id_raw = raw_repo_id.split("/")[1]
dataset_id = dataset_id_raw.removesuffix("_raw")
return f"{push_repo}/{dataset_id}"
def encode_datasets(
raw_dir: Path,
raw_repo_ids: list[str],
push_repo: str,
vcodec: str,
pix_fmt: str,
g: int,
crf: int,
local_dir: Path | None = None,
tests_data_dir: Path | None = None,
raw_format: str | None = None,
dry_run: bool = False,
) -> None:
if len(raw_repo_ids) == 1 and raw_repo_ids[0].lower() == "lerobot-raw":
raw_repo_ids_format = AVAILABLE_RAW_REPO_IDS
else:
if raw_format is None:
raise ValueError(raw_format)
raw_repo_ids_format = {id_: raw_format for id_ in raw_repo_ids}
for raw_repo_id, repo_raw_format in raw_repo_ids_format.items():
check_repo_id(raw_repo_id)
dataset_repo_id_push = get_push_repo_id_from_raw(raw_repo_id, push_repo)
dataset_raw_dir = raw_dir / raw_repo_id
dataset_dir = local_dir / dataset_repo_id_push if local_dir is not None else None
encoding = {
"vcodec": vcodec,
"pix_fmt": pix_fmt,
"g": g,
"crf": crf,
}
if not (dataset_raw_dir).is_dir():
raise NotADirectoryError(dataset_raw_dir)
if not dry_run:
push_dataset_to_hub(
dataset_raw_dir,
raw_format=repo_raw_format,
repo_id=dataset_repo_id_push,
local_dir=dataset_dir,
resume=True,
encoding=encoding,
tests_data_dir=tests_data_dir,
)
else:
print(
f"DRY RUN: {dataset_raw_dir} --> {dataset_dir} --> {dataset_repo_id_push}@{CODEBASE_VERSION}"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw-dir",
type=Path,
default=Path("data"),
help="Directory where raw datasets are located.",
)
parser.add_argument(
"--raw-repo-ids",
type=str,
nargs="*",
default=["lerobot-raw"],
help="""Raw dataset repo ids. if 'lerobot-raw', the keys from `AVAILABLE_RAW_REPO_IDS` will be
used and raw datasets will be fetched from the 'lerobot-raw/' repo and pushed with their
associated format. It is assumed that each dataset is located at `raw_dir / raw_repo_id` """,
)
parser.add_argument(
"--raw-format",
type=str,
default=None,
help="""Raw format to use for the raw repo-ids. Must be specified if --raw-repo-ids is not
'lerobot-raw'""",
)
parser.add_argument(
"--local-dir",
type=Path,
default=None,
help="""When provided, writes the dataset converted to LeRobotDataset format in this directory
(e.g. `data/lerobot/aloha_mobile_chair`).""",
)
parser.add_argument(
"--push-repo",
type=str,
default="lerobot",
help="Repo to upload datasets to",
)
parser.add_argument(
"--vcodec",
type=str,
default="libsvtav1",
help="Codec to use for encoding videos",
)
parser.add_argument(
"--pix-fmt",
type=str,
default="yuv420p",
help="Pixel formats (chroma subsampling) to be used for encoding",
)
parser.add_argument(
"--g",
type=int,
default=2,
help="Group of pictures sizes to be used for encoding.",
)
parser.add_argument(
"--crf",
type=int,
default=30,
help="Constant rate factors to be used for encoding.",
)
parser.add_argument(
"--tests-data-dir",
type=Path,
default=None,
help=(
"When provided, save tests artifacts into the given directory "
"(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})."
),
)
parser.add_argument(
"--dry-run",
type=int,
default=0,
help="If not set to 0, this script won't download or upload anything.",
)
args = parser.parse_args()
encode_datasets(**vars(args))
if __name__ == "__main__":
main()

View File

@ -29,7 +29,11 @@ from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, save_images_concurrently
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
@ -72,7 +76,14 @@ def check_format(raw_dir) -> bool:
assert c < h and c < w, f"Expect (h,w,c) image format but ({h=},{w=},{c=}) provided."
def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None = None):
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# only frames from simulation are uncompressed
compressed_images = "sim" not in raw_dir.name
@ -123,7 +134,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps)
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
@ -200,6 +211,7 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
@ -207,7 +219,7 @@ def from_raw_to_lerobot_format(
if fps is None:
fps = 50
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
@ -215,4 +227,7 @@ def from_raw_to_lerobot_format(
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info

View File

@ -81,8 +81,9 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
if video or episodes is not None:
if video or episodes or encoding is not None:
# TODO(aliberts): support this
raise NotImplementedError

View File

@ -18,6 +18,7 @@ Contains utilities to process raw data format from dora-record
"""
import re
import warnings
from pathlib import Path
import pandas as pd
@ -199,6 +200,7 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
@ -211,6 +213,12 @@ def from_raw_to_lerobot_format(
if not video:
raise NotImplementedError()
if encoding is not None:
warnings.warn(
"Video encoding is currently done outside of LeRobot for the dora_parquet format.",
stacklevel=1,
)
data_df = load_from_raw(raw_dir, videos_dir, fps, episodes)
hf_dataset = to_hf_dataset(data_df, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
@ -219,4 +227,7 @@ def from_raw_to_lerobot_format(
"fps": fps,
"video": video,
}
if video:
info["encoding"] = "unknown"
return hf_dataset, episode_data_index, info

View File

@ -26,7 +26,11 @@ from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, save_images_concurrently
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
@ -62,6 +66,7 @@ def load_from_raw(
video: bool,
episodes: list[int] | None = None,
keypoints_instead_of_image: bool = False,
encoding: dict | None = None,
):
try:
import pymunk
@ -172,7 +177,7 @@ def load_from_raw(
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps)
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
@ -244,6 +249,7 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# Manually change this to True to use keypoints of the T instead of an image observation (but don't merge
# with True). Also make sure to use video = 0 in the `push_dataset_to_hub.py` script.
@ -255,7 +261,7 @@ def from_raw_to_lerobot_format(
if fps is None:
fps = 10
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, keypoints_instead_of_image)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, keypoints_instead_of_image, encoding)
hf_dataset = to_hf_dataset(data_dict, video, keypoints_instead_of_image)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
@ -263,4 +269,7 @@ def from_raw_to_lerobot_format(
"fps": fps,
"video": video if not keypoints_instead_of_image else 0,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info

View File

@ -27,7 +27,11 @@ from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, save_images_concurrently
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
@ -60,7 +64,14 @@ def check_format(raw_dir) -> bool:
assert all(nb_frames == zarr_data[dataset].shape[0] for dataset in required_datasets)
def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None = None):
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
zarr_path = raw_dir / "cup_in_the_wild.zarr"
zarr_data = zarr.open(zarr_path, mode="r")
@ -88,49 +99,61 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
to_ids.append(to_idx)
from_idx = to_idx
ep_dicts_dir = videos_dir / "ep_dicts"
ep_dicts_dir.mkdir(exist_ok=True, parents=True)
ep_dicts = []
ep_ids = episodes if episodes else range(num_episodes)
for ep_idx, selected_ep_idx in tqdm.tqdm(enumerate(ep_ids)):
from_idx = from_ids[selected_ep_idx]
to_idx = to_ids[selected_ep_idx]
num_frames = to_idx - from_idx
ep_dict_path = ep_dicts_dir / f"{ep_idx}"
if not ep_dict_path.is_file():
from_idx = from_ids[selected_ep_idx]
to_idx = to_ids[selected_ep_idx]
num_frames = to_idx - from_idx
# TODO(rcadene): save temporary images of the episode?
# TODO(rcadene): save temporary images of the episode?
state = states[from_idx:to_idx]
state = states[from_idx:to_idx]
ep_dict = {}
ep_dict = {}
# load 57MB of images in RAM (400x224x224x3 uint8)
imgs_array = zarr_data["data/camera0_rgb"][from_idx:to_idx]
img_key = "observation.image"
if video:
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# load 57MB of images in RAM (400x224x224x3 uint8)
imgs_array = zarr_data["data/camera0_rgb"][from_idx:to_idx]
img_key = "observation.image"
if video:
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
if not video_path.is_file():
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps)
# encode images to a mp4 video
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)]
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict["observation.state"] = state
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_data_index_from"] = torch.tensor([from_idx] * num_frames)
ep_dict["episode_data_index_to"] = torch.tensor([from_idx + num_frames] * num_frames)
ep_dict["end_pose"] = end_pose[from_idx:to_idx]
ep_dict["start_pos"] = start_pos[from_idx:to_idx]
ep_dict["gripper_width"] = gripper_width[from_idx:to_idx]
torch.save(ep_dict, ep_dict_path)
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict = torch.load(ep_dict_path)
ep_dict["observation.state"] = state
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames, dtype=torch.int64)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_data_index_from"] = torch.tensor([from_idx] * num_frames)
ep_dict["episode_data_index_to"] = torch.tensor([from_idx + num_frames] * num_frames)
ep_dict["end_pose"] = end_pose[from_idx:to_idx]
ep_dict["start_pos"] = start_pos[from_idx:to_idx]
ep_dict["gripper_width"] = gripper_width[from_idx:to_idx]
ep_dicts.append(ep_dict)
data_dict = concatenate_episodes(ep_dicts)
@ -183,6 +206,7 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
@ -196,7 +220,7 @@ def from_raw_to_lerobot_format(
"Generating UMI dataset without `video=True` creates ~150GB on disk and requires ~80GB in RAM."
)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
@ -204,4 +228,7 @@ def from_raw_to_lerobot_format(
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info

View File

@ -13,6 +13,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
@ -20,6 +21,8 @@ import numpy
import PIL
import torch
from lerobot.common.datasets.video_utils import encode_video_frames
def concatenate_episodes(ep_dicts):
data_dict = {}
@ -51,3 +54,21 @@ def save_images_concurrently(imgs_array: numpy.array, out_dir: Path, max_workers
num_images = len(imgs_array)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
[executor.submit(save_image, imgs_array[i], i, out_dir) for i in range(num_images)]
def get_default_encoding() -> dict:
"""Returns the default ffmpeg encoding parameters used by `encode_video_frames`."""
signature = inspect.signature(encode_video_frames)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty and k in ["vcodec", "pix_fmt", "g", "crf"]
}
def check_repo_id(repo_id: str) -> None:
if len(repo_id.split("/")) != 2:
raise ValueError(
f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset
(e.g. 'lerobot/pusht'), but contains '{repo_id}'."""
)

View File

@ -26,7 +26,11 @@ from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, save_images_concurrently
from lerobot.common.datasets.push_dataset_to_hub.utils import (
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
calculate_episode_data_index,
hf_transform_to_torch,
@ -56,7 +60,14 @@ def check_format(raw_dir):
assert all(len(nested_dict[subkey]) == expected_len for subkey in subkeys if subkey in nested_dict)
def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episodes: list[int] | None = None):
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
pkl_path = raw_dir / "buffer.pkl"
with open(pkl_path, "rb") as f:
@ -105,7 +116,7 @@ def load_from_raw(raw_dir: Path, videos_dir: Path, fps: int, video: bool, episod
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps)
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
@ -167,6 +178,7 @@ def from_raw_to_lerobot_format(
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
# sanity check
check_format(raw_dir)
@ -174,7 +186,7 @@ def from_raw_to_lerobot_format(
if fps is None:
fps = 15
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes)
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
@ -182,4 +194,7 @@ def from_raw_to_lerobot_format(
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info

View File

@ -166,10 +166,10 @@ def encode_video_frames(
imgs_dir: Path,
video_path: Path,
fps: int,
video_codec: str = "libsvtav1",
pixel_format: str = "yuv420p",
group_of_pictures_size: int | None = 2,
constant_rate_factor: int | None = 30,
vcodec: str = "libsvtav1",
pix_fmt: str = "yuv420p",
g: int | None = 2,
crf: int | None = 30,
fast_decode: int = 0,
log_level: str | None = "error",
overwrite: bool = False,
@ -183,20 +183,20 @@ def encode_video_frames(
("-f", "image2"),
("-r", str(fps)),
("-i", str(imgs_dir / "frame_%06d.png")),
("-vcodec", video_codec),
("-pix_fmt", pixel_format),
("-vcodec", vcodec),
("-pix_fmt", pix_fmt),
]
)
if group_of_pictures_size is not None:
ffmpeg_args["-g"] = str(group_of_pictures_size)
if g is not None:
ffmpeg_args["-g"] = str(g)
if constant_rate_factor is not None:
ffmpeg_args["-crf"] = str(constant_rate_factor)
if crf is not None:
ffmpeg_args["-crf"] = str(crf)
if fast_decode:
key = "-svtav1-params" if video_codec == "libsvtav1" else "-tune"
value = f"fast-decode={fast_decode}" if video_codec == "libsvtav1" else "fastdecode"
key = "-svtav1-params" if vcodec == "libsvtav1" else "-tune"
value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode"
ffmpeg_args[key] = value
if log_level is not None:

View File

@ -101,7 +101,7 @@ from termcolor import colored
from lerobot.common.datasets.compute_stats import compute_stats
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import to_hf_dataset
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding
from lerobot.common.datasets.utils import calculate_episode_data_index
from lerobot.common.datasets.video_utils import encode_video_frames
from lerobot.common.policies.factory import make_policy
@ -479,6 +479,8 @@ def record_dataset(
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
lerobot_dataset = LeRobotDataset.from_preloaded(
repo_id=repo_id,

View File

@ -55,6 +55,7 @@ from safetensors.torch import save_file
from lerobot.common.datasets.compute_stats import compute_stats
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset
from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id
from lerobot.common.datasets.utils import flatten_dict
@ -140,14 +141,12 @@ def push_dataset_to_hub(
num_workers: int = 8,
episodes: list[int] | None = None,
force_override: bool = False,
resume: bool = False,
cache_dir: Path = Path("/tmp"),
tests_data_dir: Path | None = None,
encoding: dict | None = None,
):
# Check repo_id is well formated
if len(repo_id.split("/")) != 2:
raise ValueError(
f"`repo_id` is expected to contain a community or user id `/` the name of the dataset (e.g. 'lerobot/pusht'), but instead contains '{repo_id}'."
)
check_repo_id(repo_id)
user_id, dataset_id = repo_id.split("/")
# Robustify when `raw_dir` is str instead of Path
@ -173,7 +172,7 @@ def push_dataset_to_hub(
if local_dir.exists():
if force_override:
shutil.rmtree(local_dir)
else:
elif not resume:
raise ValueError(f"`local_dir` already exists ({local_dir}). Use `--force-override 1`.")
meta_data_dir = local_dir / "meta_data"
@ -191,7 +190,7 @@ def push_dataset_to_hub(
# convert dataset from original raw format to LeRobot format
from_raw_to_lerobot_format = get_from_raw_to_lerobot_format_fn(raw_format)
hf_dataset, episode_data_index, info = from_raw_to_lerobot_format(
raw_dir, videos_dir, fps, video, episodes
raw_dir, videos_dir, fps, video, episodes, encoding
)
lerobot_dataset = LeRobotDataset.from_preloaded(
@ -315,6 +314,12 @@ def main():
default=0,
help="When set to 1, removes provided output directory if it already exists. By default, raises a ValueError exception.",
)
parser.add_argument(
"--resume",
type=int,
default=0,
help="When set to 1, resumes a previous run.",
)
parser.add_argument(
"--tests-data-dir",
type=Path,

21
poetry.lock generated
View File

@ -3212,23 +3212,6 @@ pytest = ">=4.6"
[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
[[package]]
name = "pytest-mock"
version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = true
python-versions = ">=3.8"
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
]
[package.dependencies]
pytest = ">=6.2.5"
[package.extras]
dev = ["pre-commit", "pytest-asyncio", "tox"]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@ -4494,7 +4477,7 @@ dev = ["debugpy", "pre-commit"]
dora = ["gym-dora"]
koch = ["dynamixel-sdk", "pynput"]
pusht = ["gym-pusht"]
test = ["pytest", "pytest-cov", "pytest-mock"]
test = ["pytest", "pytest-cov"]
umi = ["imagecodecs"]
video-benchmark = ["pandas", "scikit-image"]
xarm = ["gym-xarm"]
@ -4502,4 +4485,4 @@ xarm = ["gym-xarm"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.13"
content-hash = "882b44dada0890dd4e1c727d3363d95cbe1a4adf1d80aa5263080597d80be42c"
content-hash = "dfe9c6a54e0382156e62e7bd2c7aab1be6372da76d30c61b06d27232276638cb"

View File

@ -62,7 +62,6 @@ rerun-sdk = ">=0.15.1"
deepdiff = ">=7.0.1"
scikit-image = {version = ">=0.23.2", optional = true}
pandas = {version = ">=2.2.2", optional = true}
pytest-mock = {version = ">=3.14.0", optional = true}
dynamixel-sdk = {version = ">=3.7.31", optional = true}
pynput = {version = ">=1.7.7", optional = true}
@ -74,7 +73,7 @@ pusht = ["gym-pusht"]
xarm = ["gym-xarm"]
aloha = ["gym-aloha"]
dev = ["pre-commit", "debugpy"]
test = ["pytest", "pytest-cov", "pytest-mock"]
test = ["pytest", "pytest-cov"]
umi = ["imagecodecs"]
video_benchmark = ["scikit-image", "pandas"]
koch = ["dynamixel-sdk", "pynput"]
@ -110,7 +109,6 @@ exclude = [
[tool.ruff.lint]
select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
ignore-init-module-imports = true
[build-system]

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9f9347c8d9ac90ee44e6dd86f65043438168df6bbe4bab2d2b875e55ef7376ef
size 1488
oid sha256:7841afb9ef99c0601448c43a20c25eb029440c73816319c67c5d7e1c5cde2445
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:02fc4ea25766269f65752a60b0594c43d799b0ae528cd773bf024b064b5aa329
oid sha256:03508d82db846a804aef1a28aec3cb9572e3105b55a02b6ddbb09b2522d57b84
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:55d7b1a06fe3e3051482752740074348bdb5fc98fb2e305b06d6203994117b27
oid sha256:7009b3d2f14d6af497eeb32a52332e79cb9c07db24a6c2bbfbeffbaa8151dd69
size 592448

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:98329e4b40e9be0d63f7d36da9d86c44bbe7eeeb1b10d3ba973c923f3be70867
oid sha256:34ece24fb6b302db0b68987858509f31713fb299faa9a9d34b8fd68f10bc3100
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:54e42cdfd016a0ced2ab1fe2966a8c15a2384e0dbe1a2fe87433a2d1b8209ac0
size 5220057
oid sha256:a70cc17019407cf6bee44fa2c78b4f29e48eb1696aa1a4ff4c048ba256574523
size 6356921

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:af1ded2a244cb47a96255b75f584a643edf6967e13bb5464b330ffdd9d7ad859
size 5284692
oid sha256:2b35992036e6dcee7d4df6d1675d55d1dd2d658b2d65442737e709895699a2f0
size 5084448

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:13d1bebabd79984fd6715971be758ef9a354495adea5e8d33f4e7904365e112b
size 5258380
oid sha256:3aa92e6b6bd0e39f6de530ea6a270671db7350cdc101c9d9030c775539c708c1
size 5441406

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f33bc6810f0b91817a42610364cb49ed1b99660f058f0f9407e6f5920d0aee02
size 1008
oid sha256:4ee862b1a6dc1d11df77c36c47ea00db88ad35a48e4d71c2940ad26b55fe2167
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7b58d6c89e936a781a307805ebecf0dd473fbc02d52a7094da62e54bffb9454a
oid sha256:095c30bfe3c5da168c85aceef905e74e2142866332282965aa6812f6e6e48448
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a08be578285cbe2d35b78f150d464ff3e10604a9865398c976983e0d711774f9
oid sha256:98859f2d87e1a0abb9a930a82af623504b3efb26f70fe576f05bab7f19024427
size 788528

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:34e36233477c8aa0b0840314ddace072062d4f486d06546bbd6550832c370065
oid sha256:38cf4116a65cb92a5c43f9b9da7a7b81cfa9168b17605c8c456f7d3a3a23b77a
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:66e7349a4a82ca6042a7189608d01eb1cfa38d100d039b5445ae1a9e65d824ab
size 14470946
oid sha256:596dda720d378a44b6b61a6a72b44bec3e55e85198bca37f9dace6fe84af7ff0
size 16062396

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a2146f0c10c9f2611e57e617983aa4f91ad681b4fc50d91b992b97abd684f926
size 11662185
oid sha256:c614bbaf93d65354a82001b357682a0bd36f9603685f6c735c5e377b763d0bdb
size 10317415

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5affbaf1c48895ba3c626e0d8cf1309e5f4ec6bbaa135313096f52a22de66c05
size 11410342
oid sha256:868788028a38334b6b566cb17ffcc2ace2ec2b2b68ff2a58b6d29eb3c3e2ec1f
size 9516445

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6c2b195ca91b88fd16422128d386d2cabd808a1862c6d127e6bf2e83e1fe819a
size 448
oid sha256:f365a02b052a2697b1558f4ab9b813f0d4ba46a5bc6ae3da30bbc4b135426aa6
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b360b6b956d2adcb20589947c553348ef1eb6b70743c989dcbe95243d8592ce5
oid sha256:5c96f47b569b7af82e05200213d733626664150aa7c5ae3298fd04a2138a2023
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3f5c3926b4d4da9271abefcdf6a8952bb1f13258a9c39fe0fd223f548dc89dcb
oid sha256:75f53d221827f17cc2ded3908452e24331b39b79dc3a26f2b9d89a6e6894baab
size 887728

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4993b05fb026619eec5eb70db8cadaa041ba4ab92d38b4a387167ace03b1018b
oid sha256:d394d451929b805f2d94f9fc5b12d15c31cfc494df76d7d642b63378b8ba0131
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bd25d17ef5b7500386761b5e32920879bbdcafe0e17a8a8845628525d861e644
size 10231081
oid sha256:73ddb898f83589b4bcabe978e46e75f20be215492f115bf6ebc98f1d01e1eff8
size 9696507

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5b557acbfeb0681c0a38e47263d945f6cd3a03461298d8b17209c81e3fd0aae8
size 9701371
oid sha256:d3d993977bee96882732d4a9c9d082c356fc9fcd8199c027b016207d60494c2f
size 8957007

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:da8f3b4f9f965da63819652b2c042d4cf7e07d14631113ea072087d56370310e
size 10473741
oid sha256:c9321627184c14af4a6ba64d02e86f7253bc1f563a3adef17036d68480d2bb3e
size 9938178

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a053506017d8a78cfd307b2912eeafa1ac1485a280cf90913985fcc40120b5ec
size 416
oid sha256:88346956fdf58f17dba7b08cc858364ed8278a7baa20febd9c68ae959d2c9c82
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d6d172d1bca02face22ceb4c21ea2b054cf3463025485dce64711b6f36b31f8a
oid sha256:de80d5afc044be903a89ee08f30cfef5fb4c1e928d8ba8f4d81ea9d0bb4fb011
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7e5ce817a2c188041f57f8d4c465dab3b9c3e4e1aeb7a9fb270230d1b36df530
oid sha256:79c2a3da1024fa140d23e8438b2756d27cf5db65ac70d7ac4215260b55ca55f8
size 1477064

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4eb2dc373e4ea7d474742590f9073d66a773f6ab94b9e73a8673df19f93fae6d
oid sha256:69435f30146a309c8d7d0eb01216555bf0547095db1fc9c20218d481d6fe62c8
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d2c55b146fabe78b18c8a28a7746ab56e1ee7a6918e9e3dad9bd196f97975895
size 26158915
oid sha256:3fc89b720dfb7511d5dd9eba31494cf720e6a89519067b7b5a4d65f0a539c811
size 35137505

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:71e1958d77f56843acf1ec48da4f04311a5836c87a0e77dbe26aa47c27c6347e
size 18786848
oid sha256:26b8d97a096aa8a1d686d86fc93bde1dcdd50a9dc273f49f3b6a700fe6610e88
size 20387806

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:20780718399b5759ff9a3a79824986310524793066198e3b9a307222f11a93df
size 17769988
oid sha256:72000be2803259f40da6d093279d17ed194ead3ebc508bf2d77cb463bcb67c4d
size 17594265

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:279916f7689ae46af90e92a46eba9486a71fc762e3e2679ab5441eb37126827b
size 928
oid sha256:fb6de86fee6ff3cc5d61d591fe480a50feb289c05770e3f4b76e24138b571c65
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7a7731051b521694b52b5631470720a7f05331915f4ac4e7f8cd83f9ff459bce
oid sha256:d79027c2513c01a7e360f3177e62ab955e5d3f704f1e7127a6e1e852158ec42c
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:99608258e8c9fe5191f1a12edc29b47d307790104149dffb6d3046ddad6aeb1b
oid sha256:0a2c1f98c816728136291fcb7530cd0ebcf4ea47b0f6750836da56b8324d64c1
size 435600

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ae6735b7b394914824e974a7461019373a10f9e2d84ddf834bec8ea268d9ec1e
oid sha256:921505133c62906bd53034a613a827996994875d84c8b26d69d188df9a7ffeba
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:833e288c5fdacbbe10a5d048cb6f49fe1a396d91b2117b827e130ec11069256a
size 8397615
oid sha256:7e298db7d820e2ff9f0b9c250e800e8ada3521fdeae3c4127452dd62700e9ac8
size 10980189

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2cb870acb4855fef70f19c5f632d94e4c25eef59eeea92f4b1167a44b1b36b33
size 5912007
oid sha256:29b46c2e823d62b1329b98a3d7efffd24fc6c904e9cea115e2f0adb1bb45db44
size 7229025

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8be36298141b455ea51d17a78e4bbc6619639302139fe2db605bdfa3ff5e91bd
size 4794018
oid sha256:f34ddbd109b212260c758d54a0930f75a38666a178a0d26eeefa846cfeac86c0
size 5944469

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:348d0ee38a71929b2017d540de870b9dff6d79efdd0cbc5352fa9697e350134a
size 928
oid sha256:1386f9030607facefe56f429c93e50df0e22017914ce3f21ab67edc87b936d9d
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c5c2996f58d5277fa19cf56ec143334fbee940d1de37530452496a6f0aa11f88
oid sha256:7ffb173891cebb47a4d24d051f5fdd2ec44493d0a1a48d11f4d1410049aadd5b
size 4344

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:da3a8efea9ba60d1fdd209d45a3387df22a09f7c156904ecb03f10456736fb74
oid sha256:ae1760af2d3bf13c6e868643f203e76e1faf81a237715f72f2b81c3199e95e96
size 514056

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6b7111ff1ef5c4d6a2990f5f39f42398f061da8c4e81adf46b9d9150ec2feeaf
oid sha256:505a42c408d56c8a7d3e2367280b41e27667b58334f32e84c937c44c38217bd6
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2ac8c2755d940534042595ecad33ebea358974ec67bc041c8675e53b7d2272ff
size 9182551
oid sha256:1489dac711fb99b192f064f9dbe56ed0e9e80fedc34da469e85acc7d5b4d75bf
size 12316772

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b35aaa37e66dd5563d93e6059d5b645e112e020e03bd398f7098a5289970953a
size 6378566
oid sha256:20edc20184b5e4eb45194016fe7a0a5673665e3105286e0c6563767b5ff461f3
size 6365474

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6138247ba7160a3de6c50111e6fcc5ae075044086d8527ae5d435b1f8a7c7a93
size 6439183
oid sha256:4ccdc96d9fe560a841e45e9fa636b69ef35b518271982339516517a4ae47d04f
size 7449799

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e11c127b1ab12f0761bce6651fa5a4882093924df230294f2f34309bc74b0707
size 672
oid sha256:9ee4f3c571ce6822e157e60133bee02245febee93eba5d35458d3c83345f7b87
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5efab3606f50ee7ac8bb0c88cfeefd86bfd060dbb75d063e01d09456da020026
oid sha256:b05f933aa67d559e44f062c8428b2f85ee7b49d3bf0e0302b9b83fb7d48ed0a3
size 2904

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:724a26cc4a3fc54ea5deb835816afa4a1c9712958ba402cd3067c22f4556a532
oid sha256:8698f98e3fe36e321ba99a9b60facaab4abffb26916042b021adc1b41e8fb877
size 100040

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:71d6ba89bee5a4ee2761220452999e415bc838a44bebf1b5a2e4ba8622369798
oid sha256:c0b18566cbf59e399ea40f1630df12ffbbb9f73bbc733d1d4eba62d675b1fda5
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:12cd101db746125d40cf2e27c79340a3786c2906feca11a34e380c5d88280d36
size 1329662
oid sha256:5a57aade7d8510ef1cc8778f90cfa86749c95fa0c5a5e80cb166b2edd0f7189a
size 1788513

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:083db9efc5c9e3396c5e1159d020c2a3786f1f1a4b069719d327ed7fbc65c34d
size 33
oid sha256:e7ab5c2bd7d176d4d7902a600240318c2828b7d75f4a888d0887327e4eff089d
size 65

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4500f31e62f0928a837fa71783acacda0db516c7b00d0586a41ea5fd8fc5e772
size 928
oid sha256:4e910eac6a1c94f4c194b05e908dcc973dd4227b18eb80c374d7a1150f166c34
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ae67bab70f3b313427fdcb447ed0a1a3d09581ff7ae8cc64ddd2243ef9ccb6c0
oid sha256:a85e57264325cc0927450e30a85dd0eacb0a70ebdb00c4e2ac043a57f9c200e2
size 2904

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:277340fe2c2ca9f40a2cf27caf66dbb47089d690917a076e341d3be586b874d1
oid sha256:171a9efc9c45601688821936ec9a1dcf91f16b1bbab4e8246f18b4d4cc6ac6ee
size 80432

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e48156ce4f71ac15d78732312fbc7e199f0ecdaac3604231e6be2e3e5b31a0ad
oid sha256:5fd5fe80657788d044cdc8a1baf1456c7695cc951049347a469165002a83c6c7
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:292e6815ae7431d07ee75a5a770fdc8fd6fe8479eb104c33774ef0049f0dd768
size 963206
oid sha256:cb4810728c3d642326bf5fa2cd1632a60e68880faace4ec7368c6ee7992dabfb
size 1297818

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:083db9efc5c9e3396c5e1159d020c2a3786f1f1a4b069719d327ed7fbc65c34d
size 33
oid sha256:e7ab5c2bd7d176d4d7902a600240318c2828b7d75f4a888d0887327e4eff089d
size 65

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4500f31e62f0928a837fa71783acacda0db516c7b00d0586a41ea5fd8fc5e772
size 928
oid sha256:4e910eac6a1c94f4c194b05e908dcc973dd4227b18eb80c374d7a1150f166c34
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ba10877c155ae24bfe23790b4e13dba9f9fb0b9c11df632fdf4d7032201a01bf
oid sha256:95b34b7eeeba8216732f1977f05054e904393af4987a66634ecb3785b61540cb
size 2904

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9eb1141e9e4ef621d0f07c78a2a858f245e2088e6f0262480f95a8cb9217deba
oid sha256:58680925b02438e0af27f1280df443fd1036d2e5c7f92e9982d4e559c03fe7a2
size 80432

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6ff72fd4f6f61309191a7f2829b73649d836c1ed10f00983093dc68599c92404
oid sha256:bf10e41f2df8c5dc1c19ba8e2d2256ec9d81bdbe28b95079d28039fe2a28504c
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2590252d39d64c5c939866095c629289f060176799b7cc02b54154afb4b40039
size 988385
oid sha256:ef794082f52304f1b4a7ed6be6e4a44db075e604baa99b1a353cafb52dd7ef0a
size 1305598

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:083db9efc5c9e3396c5e1159d020c2a3786f1f1a4b069719d327ed7fbc65c34d
size 33
oid sha256:e7ab5c2bd7d176d4d7902a600240318c2828b7d75f4a888d0887327e4eff089d
size 65

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4500f31e62f0928a837fa71783acacda0db516c7b00d0586a41ea5fd8fc5e772
size 928
oid sha256:4e910eac6a1c94f4c194b05e908dcc973dd4227b18eb80c374d7a1150f166c34
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:004a428770c0921190ed54dbe586316ca1d39be04a5a34085d219851726ee1df
oid sha256:ddd841d6c38f86a66bc6cae1b94a7a2b20ade873c7b0b6803ce53ad8e57ed7bb
size 2904

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e8847cded62e1861b6128a1b03338d9cd01356a4d71d9ac874dc9371fc2f7dc2
oid sha256:6f861c1477a21509ba5fa68d64bb758e29dfa3d7650d89cd33133f088762493a
size 80432

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4d5bfac4bd22cab6449b24e457719c6598b367f191160335cba81c3b416b1cd5
oid sha256:8f3dc6a1e423b8cd007396a0261664f364d330d350e569a6a0958b834ee1619b
size 247

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f3c20e55b4320b598b84b32abeef4993c9309621de86799f9fa740c76b34489c
size 1014327
oid sha256:5b6dedec5ad69dfe707bdad7123bd7c0fee6013c3bda30265baa166f568f7d7f
size 1426630

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:083db9efc5c9e3396c5e1159d020c2a3786f1f1a4b069719d327ed7fbc65c34d
size 33
oid sha256:e7ab5c2bd7d176d4d7902a600240318c2828b7d75f4a888d0887327e4eff089d
size 65

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a17b1cd612f06662c14e0a74f9bd4787ab97558d57d6e150d06a401a48eccba9
size 912
oid sha256:7a1f252f4880c54e2f6b7ee12b644115b470b8bdfc90c0c6fc7914a5a8e5a785
size 136

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf148247bf191c7f7e8af738a7b9e147f9ffffeec0e4b9d1c4783c4e384da7eb
size 33
oid sha256:50e40e4c2bb523fca0b54e9a9635281312e9c6f9d757db03c06a0865c5508f29
size 188

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f724049d7bd23458f02c606fa59cc1d0f2a44f7fac9e5b4c4eff97c95ac132b3
oid sha256:c7e5083e83c109e2e11d3f4a0428d592dc75d16ab8c5a2c63fab9bc5ecd13bef
size 4208

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:52fb2e9785eb8d069def324272008ca829ebb32830165bdcbbfdfd3cc6bf42dd
oid sha256:2d36e6e644b5e8ad65bb98c4be5ed47c9ecd37c4cbd7f3e0e66817dbcf33e7f2
size 240928

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5e06118a224b4477c0aaf17d43930cd7f516f3de932c6d5547ee741f609a6228
oid sha256:722e0ba30e6f4ddce2d31166bfca753bf3c45c507a0f911e1635f0ec2521569e
size 247

Some files were not shown because too many files have changed in this diff Show More