diff --git a/README.md b/README.md index 1051c8a6..71990e28 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ git lfs pull When adding a new dataset, mock it with ``` -python tests/scripts/mock_dataset.py --in-data-dir data/ --out-data-dir tests/data/ +python tests/scripts/mock_dataset.py --in-data-dir data/$DATASET --out-data-dir tests/data/$DATASET ``` Run tests @@ -155,7 +155,9 @@ huggingface-cli login --token $HUGGINGFACE_TOKEN --add-to-git-credential Then you can upload it to the hub with: ``` -HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli upload --repo-type dataset $HF_USER/$DATASET data/$DATASET +HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli upload $HF_USER/$DATASET data/$DATASET \ +--repo-type dataset \ +--revision v1.0 ``` For instance, for [cadene/pusht](https://huggingface.co/datasets/cadene/pusht), we used: @@ -164,6 +166,34 @@ HF_USER=cadene DATASET=pusht ``` +If you want to improve an existing dataset, you can download it locally with: +``` +mkdir -p data/$DATASET +HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download $HF_USER/$DATASET \ +--repo-type dataset \ +--local-dir data/$DATASET \ +--local-dir-use-symlinks=False \ +--revision v1.0 +``` + +Iterate on your code and dataset with: +``` +DATA_DIR=data python train.py +``` + +Then upload a new version: +``` +HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli upload $HF_USER/$DATASET data/$DATASET \ +--repo-type dataset \ +--revision v1.1 \ +--delete "*" +``` + +And you might want to mock the dataset if you need to update the unit tests as well: +``` +python tests/scripts/mock_dataset.py --in-data-dir data/$DATASET --out-data-dir tests/data/$DATASET +``` + ## Acknowledgment - Our Diffusion policy and Pusht environment are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu/) diff --git a/lerobot/common/datasets/abstract.py b/lerobot/common/datasets/abstract.py index 34b33c2e..9127d887 100644 --- a/lerobot/common/datasets/abstract.py +++ b/lerobot/common/datasets/abstract.py @@ -19,6 +19,7 @@ class AbstractExperienceReplay(TensorDictReplayBuffer): def __init__( self, dataset_id: str, + version: str | None = None, batch_size: int = None, *, shuffle: bool = True, @@ -31,6 +32,7 @@ class AbstractExperienceReplay(TensorDictReplayBuffer): transform: "torchrl.envs.Transform" = None, ): self.dataset_id = dataset_id + self.version = version self.shuffle = shuffle self.root = root storage = self._download_or_load_dataset() @@ -96,10 +98,14 @@ class AbstractExperienceReplay(TensorDictReplayBuffer): def _download_or_load_dataset(self) -> torch.StorageBase: if self.root is None: - self.data_dir = Path(snapshot_download(repo_id=f"cadene/{self.dataset_id}", repo_type="dataset")) + self.data_dir = Path( + snapshot_download( + repo_id=f"cadene/{self.dataset_id}", repo_type="dataset", revision=self.version + ) + ) else: self.data_dir = self.root / self.dataset_id - return TensorStorage(TensorDict.load_memmap(self.data_dir)) + return TensorStorage(TensorDict.load_memmap(self.data_dir / "replay_buffer")) def _compute_stats(self, num_batch=100, batch_size=32): rb = TensorDictReplayBuffer( diff --git a/lerobot/common/datasets/aloha.py b/lerobot/common/datasets/aloha.py index 52a5676e..2af98cd8 100644 --- a/lerobot/common/datasets/aloha.py +++ b/lerobot/common/datasets/aloha.py @@ -84,6 +84,7 @@ class AlohaExperienceReplay(AbstractExperienceReplay): def __init__( self, dataset_id: str, + version: str | None = None, batch_size: int = None, *, shuffle: bool = True, @@ -99,6 +100,7 @@ class AlohaExperienceReplay(AbstractExperienceReplay): super().__init__( dataset_id, + version, batch_size, shuffle=shuffle, root=root, diff --git a/lerobot/common/datasets/pusht.py b/lerobot/common/datasets/pusht.py index f4f6d9ac..3ad6371f 100644 --- a/lerobot/common/datasets/pusht.py +++ b/lerobot/common/datasets/pusht.py @@ -87,6 +87,7 @@ class PushtExperienceReplay(AbstractExperienceReplay): def __init__( self, dataset_id: str, + version: str | None = "v1.0", batch_size: int = None, *, shuffle: bool = True, @@ -100,6 +101,7 @@ class PushtExperienceReplay(AbstractExperienceReplay): ): super().__init__( dataset_id, + version, batch_size, shuffle=shuffle, root=root, diff --git a/lerobot/common/datasets/simxarm.py b/lerobot/common/datasets/simxarm.py index 7bcb03fb..d7e2e18f 100644 --- a/lerobot/common/datasets/simxarm.py +++ b/lerobot/common/datasets/simxarm.py @@ -40,6 +40,7 @@ class SimxarmExperienceReplay(AbstractExperienceReplay): def __init__( self, dataset_id: str, + version: str | None = None, batch_size: int = None, *, shuffle: bool = True, @@ -53,6 +54,7 @@ class SimxarmExperienceReplay(AbstractExperienceReplay): ): super().__init__( dataset_id, + version, batch_size, shuffle=shuffle, root=root, diff --git a/tests/data/pusht/action.memmap b/tests/data/pusht/replay_buffer/action.memmap similarity index 100% rename from tests/data/pusht/action.memmap rename to tests/data/pusht/replay_buffer/action.memmap diff --git a/tests/data/pusht/episode.memmap b/tests/data/pusht/replay_buffer/episode.memmap similarity index 100% rename from tests/data/pusht/episode.memmap rename to tests/data/pusht/replay_buffer/episode.memmap diff --git a/tests/data/pusht/frame_id.memmap b/tests/data/pusht/replay_buffer/frame_id.memmap similarity index 100% rename from tests/data/pusht/frame_id.memmap rename to tests/data/pusht/replay_buffer/frame_id.memmap diff --git a/tests/data/pusht/meta.json b/tests/data/pusht/replay_buffer/meta.json similarity index 100% rename from tests/data/pusht/meta.json rename to tests/data/pusht/replay_buffer/meta.json diff --git a/tests/data/pusht/next/done.memmap b/tests/data/pusht/replay_buffer/next/done.memmap similarity index 100% rename from tests/data/pusht/next/done.memmap rename to tests/data/pusht/replay_buffer/next/done.memmap diff --git a/tests/data/pusht/next/meta.json b/tests/data/pusht/replay_buffer/next/meta.json similarity index 100% rename from tests/data/pusht/next/meta.json rename to tests/data/pusht/replay_buffer/next/meta.json diff --git a/tests/data/pusht/next/observation/image.memmap b/tests/data/pusht/replay_buffer/next/observation/image.memmap similarity index 100% rename from tests/data/pusht/next/observation/image.memmap rename to tests/data/pusht/replay_buffer/next/observation/image.memmap diff --git a/tests/data/pusht/next/observation/meta.json b/tests/data/pusht/replay_buffer/next/observation/meta.json similarity index 100% rename from tests/data/pusht/next/observation/meta.json rename to tests/data/pusht/replay_buffer/next/observation/meta.json diff --git a/tests/data/pusht/next/observation/state.memmap b/tests/data/pusht/replay_buffer/next/observation/state.memmap similarity index 100% rename from tests/data/pusht/next/observation/state.memmap rename to tests/data/pusht/replay_buffer/next/observation/state.memmap diff --git a/tests/data/pusht/next/reward.memmap b/tests/data/pusht/replay_buffer/next/reward.memmap similarity index 100% rename from tests/data/pusht/next/reward.memmap rename to tests/data/pusht/replay_buffer/next/reward.memmap diff --git a/tests/data/pusht/next/success.memmap b/tests/data/pusht/replay_buffer/next/success.memmap similarity index 100% rename from tests/data/pusht/next/success.memmap rename to tests/data/pusht/replay_buffer/next/success.memmap diff --git a/tests/data/pusht/observation/image.memmap b/tests/data/pusht/replay_buffer/observation/image.memmap similarity index 100% rename from tests/data/pusht/observation/image.memmap rename to tests/data/pusht/replay_buffer/observation/image.memmap diff --git a/tests/data/pusht/observation/meta.json b/tests/data/pusht/replay_buffer/observation/meta.json similarity index 100% rename from tests/data/pusht/observation/meta.json rename to tests/data/pusht/replay_buffer/observation/meta.json diff --git a/tests/data/pusht/observation/state.memmap b/tests/data/pusht/replay_buffer/observation/state.memmap similarity index 100% rename from tests/data/pusht/observation/state.memmap rename to tests/data/pusht/replay_buffer/observation/state.memmap diff --git a/tests/scripts/mock_dataset.py b/tests/scripts/mock_dataset.py index c58280d7..2200b644 100644 --- a/tests/scripts/mock_dataset.py +++ b/tests/scripts/mock_dataset.py @@ -10,12 +10,15 @@ from pathlib import Path def mock_dataset(in_data_dir, out_data_dir, num_frames=50): + in_data_dir = Path(in_data_dir) + out_data_dir = Path(out_data_dir) + # load full dataset as a tensor dict - in_td_data = TensorDict.load_memmap(in_data_dir) + in_td_data = TensorDict.load_memmap(in_data_dir / "replay_buffer") # use 1 frame to know the specification of the dataset # and copy it over `n` frames in the test artifact directory - out_td_data = in_td_data[0].expand(num_frames).memmap_like(out_data_dir) + out_td_data = in_td_data[0].expand(num_frames).memmap_like(out_data_dir / "replay_buffer") # copy the first `n` frames so that we have real data out_td_data[:num_frames] = in_td_data[:num_frames].clone() @@ -24,8 +27,8 @@ def mock_dataset(in_data_dir, out_data_dir, num_frames=50): out_td_data.lock_() # copy the full statistics of dataset since it's pretty small - in_stats_path = Path(in_data_dir) / "stats.pth" - out_stats_path = Path(out_data_dir) / "stats.pth" + in_stats_path = in_data_dir / "stats.pth" + out_stats_path = out_data_dir / "stats.pth" shutil.copy(in_stats_path, out_stats_path)