From 772927616af0d7ff3be91f2a9bd63a6fb56b867a Mon Sep 17 00:00:00 2001 From: Remi Cadene Date: Wed, 22 May 2024 09:12:34 +0000 Subject: [PATCH] fix --- .../push_dataset_to_hub/aloha_dora_format.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lerobot/common/datasets/push_dataset_to_hub/aloha_dora_format.py b/lerobot/common/datasets/push_dataset_to_hub/aloha_dora_format.py index 6240860b..b7f9f870 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/aloha_dora_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/aloha_dora_format.py @@ -76,15 +76,6 @@ def load_from_raw(raw_dir: Path, out_dir: Path): data_df["next.done"] = False data_df.loc[data_df.groupby("episode_index").tail(1).index, "next.done"] = True - # Get the episode index containing for each unique episode index - first_ep_index_df = data_df.groupby("episode_index").agg(start_index=("index", "first")).reset_index() - from_ = first_ep_index_df["start_index"].tolist() - to_ = from_[1:] + [len(data_df)] - episode_data_index = { - "from": from_, - "to": to_, - } - data_df["timestamp"] = data_df["timestamp_utc"].map(lambda x: x.timestamp()) # each episode starts with timestamp 0 to match the ones from the video data_df["timestamp"] = data_df.groupby("episode_index")["timestamp"].transform(lambda x: x - x.iloc[0]) @@ -135,6 +126,15 @@ def load_from_raw(raw_dir: Path, out_dir: Path): else: raise ValueError(key) + # Get the episode index containing for each unique episode index + first_ep_index_df = data_df.groupby("episode_index").agg(start_index=("index", "first")).reset_index() + from_ = first_ep_index_df["start_index"].tolist() + to_ = from_[1:] + [len(data_df)] + episode_data_index = { + "from": from_, + "to": to_, + } + return data_dict, episode_data_index