diff --git a/examples/1_load_hugging_face_dataset.py b/examples/1_load_hugging_face_dataset.py index f10fb4bd..ec6dd4ec 100644 --- a/examples/1_load_hugging_face_dataset.py +++ b/examples/1_load_hugging_face_dataset.py @@ -11,8 +11,8 @@ This script supports several Hugging Face datasets, among which: 1. [Pusht](https://huggingface.co/datasets/lerobot/pusht) 2. [Xarm Lift Medium](https://huggingface.co/datasets/lerobot/xarm_lift_medium) 3. [Xarm Lift Medium Replay](https://huggingface.co/datasets/lerobot/xarm_lift_medium_replay) -4. [Xarm Lift Medium](https://huggingface.co/datasets/lerobot/xarm_push_medium) -5. [Xarm Lift Medium](https://huggingface.co/datasets/lerobot/xarm_push_medium_replay) +4. [Xarm Push Medium](https://huggingface.co/datasets/lerobot/xarm_push_medium) +5. [Xarm Push Medium Replay](https://huggingface.co/datasets/lerobot/xarm_push_medium_replay) 6. [Aloha Sim Insertion Human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) 7. [Aloha Sim Insertion Scripted](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_scripted) 8. [Aloha Sim Transfer Cube Human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) diff --git a/examples/2_load_lerobot_dataset.py b/examples/2_load_lerobot_dataset.py index 53ad18a2..8bdeb208 100644 --- a/examples/2_load_lerobot_dataset.py +++ b/examples/2_load_lerobot_dataset.py @@ -60,7 +60,7 @@ print(f"keys to access images from cameras: {dataset.image_keys=}") # While the LeRobot dataset adds helpers for working within our library, we still expose the underling Hugging Face dataset. It may be freely replaced or modified in place. Here we use the filtering to keep only frames from episode 5. dataset.hf_dataset = dataset.hf_dataset.filter(lambda frame: frame["episode_index"] == 5) -# LeRobot datsets actually subclass PyTorch datasets. So you can do everything you know and love from working with the latter, for example: iterating through the dataset. Here we grap all the image frames. +# LeRobot datsets actually subclass PyTorch datasets. So you can do everything you know and love from working with the latter, for example: iterating through the dataset. Here we grab all the image frames. frames = [sample["observation.image"] for sample in dataset] # but frames are now float32 range [0,1] channel first to follow pytorch convention, diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index 115cdec7..741082ef 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -195,7 +195,7 @@ def load_previous_and_future_frames( def get_stats_einops_patterns(hf_dataset): """These einops patterns will be used to aggregate batches and compute statistics. - Note: We assume the images are returned in channel first format + Note: We assume the images of `hf_dataset` are in channel first format """ dataloader = torch.utils.data.DataLoader(