diff --git a/.dockerignore b/.dockerignore
index b8c1be15..c0d8a84b 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Misc
.git
tmp
@@ -59,7 +73,7 @@ pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
-!tests/data
+!tests/artifacts
htmlcov/
.tox/
.nox/
diff --git a/.gitattributes b/.gitattributes
index 7da36424..44e16cf1 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
*.memmap filter=lfs diff=lfs merge=lfs -text
*.stl filter=lfs diff=lfs merge=lfs -text
*.safetensors filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
index 7cbed673..2fb23051 100644
--- a/.github/ISSUE_TEMPLATE/bug-report.yml
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: "\U0001F41B Bug Report"
description: Submit a bug report to help us improve LeRobot
body:
diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml
index 3c63fa11..0cb11d57 100644
--- a/.github/workflows/build-docker-images.yml
+++ b/.github/workflows/build-docker-images.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
name: Builds
diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml
index 210a690c..adac9f20 100644
--- a/.github/workflows/nightly-tests.yml
+++ b/.github/workflows/nightly-tests.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
name: Nightly
diff --git a/.github/workflows/pr_style_bot.yml b/.github/workflows/pr_style_bot.yml
deleted file mode 100644
index 68530645..00000000
--- a/.github/workflows/pr_style_bot.yml
+++ /dev/null
@@ -1,161 +0,0 @@
-# Adapted from https://github.com/huggingface/diffusers/blob/main/.github/workflows/pr_style_bot.yml
-name: PR Style Bot
-
-on:
- issue_comment:
- types: [created]
-
-permissions: {}
-
-env:
- PYTHON_VERSION: "3.10"
-
-jobs:
- check-permissions:
- if: >
- contains(github.event.comment.body, '@bot /style') &&
- github.event.issue.pull_request != null
- runs-on: ubuntu-latest
- outputs:
- is_authorized: ${{ steps.check_user_permission.outputs.has_permission }}
- steps:
- - name: Check user permission
- id: check_user_permission
- uses: actions/github-script@v6
- with:
- script: |
- const comment_user = context.payload.comment.user.login;
- const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({
- owner: context.repo.owner,
- repo: context.repo.repo,
- username: comment_user
- });
-
- const authorized =
- permission.permission === 'admin' ||
- permission.permission === 'write';
-
- console.log(
- `User ${comment_user} has permission level: ${permission.permission}, ` +
- `authorized: ${authorized} (admins & maintainers allowed)`
- );
-
- core.setOutput('has_permission', authorized);
-
- run-style-bot:
- needs: check-permissions
- if: needs.check-permissions.outputs.is_authorized == 'true'
- runs-on: ubuntu-latest
- permissions:
- contents: write
- pull-requests: write
- steps:
- - name: Extract PR details
- id: pr_info
- uses: actions/github-script@v6
- with:
- script: |
- const prNumber = context.payload.issue.number;
- const { data: pr } = await github.rest.pulls.get({
- owner: context.repo.owner,
- repo: context.repo.repo,
- pull_number: prNumber
- });
-
- // We capture both the branch ref and the "full_name" of the head repo
- // so that we can check out the correct repository & branch (including forks).
- core.setOutput("prNumber", prNumber);
- core.setOutput("headRef", pr.head.ref);
- core.setOutput("headRepoFullName", pr.head.repo.full_name);
-
- - name: Check out PR branch
- uses: actions/checkout@v4
- env:
- HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
- HEADREF: ${{ steps.pr_info.outputs.headRef }}
- with:
- persist-credentials: true
- # Instead of checking out the base repo, use the contributor's repo name
- repository: ${{ env.HEADREPOFULLNAME }}
- ref: ${{ env.HEADREF }}
- # You may need fetch-depth: 0 for being able to push
- fetch-depth: 0
- token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Debug
- env:
- HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
- HEADREF: ${{ steps.pr_info.outputs.headRef }}
- PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
- run: |
- echo "PR number: ${PRNUMBER}"
- echo "Head Ref: ${HEADREF}"
- echo "Head Repo Full Name: ${HEADREPOFULLNAME}"
-
- - name: Set up Python
- uses: actions/setup-python@v4
- with:
- python-version: ${{ env.PYTHON_VERSION }}
-
- - name: Get Ruff Version from pre-commit-config.yaml
- id: get-ruff-version
- run: |
- RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
- echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
-
- - name: Install Ruff
- env:
- RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
- run: python -m pip install "ruff==${RUFF_VERSION}"
-
- - name: Ruff check
- run: ruff check --fix
-
- - name: Ruff format
- run: ruff format
-
- - name: Commit and push changes
- id: commit_and_push
- env:
- HEADREPOFULLNAME: ${{ steps.pr_info.outputs.headRepoFullName }}
- HEADREF: ${{ steps.pr_info.outputs.headRef }}
- PRNUMBER: ${{ steps.pr_info.outputs.prNumber }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- echo "HEADREPOFULLNAME: ${HEADREPOFULLNAME}, HEADREF: ${HEADREF}"
- # Configure git with the Actions bot user
- git config user.name "github-actions[bot]"
- git config user.email "github-actions[bot]@users.noreply.github.com"
- git config --local lfs.https://github.com/.locksverify false
-
- # Make sure your 'origin' remote is set to the contributor's fork
- git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${HEADREPOFULLNAME}.git"
-
- # If there are changes after running style/quality, commit them
- if [ -n "$(git status --porcelain)" ]; then
- git add .
- git commit -m "Apply style fixes"
- # Push to the original contributor's forked branch
- git push origin HEAD:${HEADREF}
- echo "changes_pushed=true" >> $GITHUB_OUTPUT
- else
- echo "No changes to commit."
- echo "changes_pushed=false" >> $GITHUB_OUTPUT
- fi
-
- - name: Comment on PR with workflow run link
- if: steps.commit_and_push.outputs.changes_pushed == 'true'
- uses: actions/github-script@v6
- with:
- script: |
- const prNumber = parseInt(process.env.prNumber, 10);
- const runUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`
-
- await github.rest.issues.createComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: prNumber,
- body: `Style fixes have been applied. [View the workflow run here](${runUrl}).`
- });
- env:
- prNumber: ${{ steps.pr_info.outputs.prNumber }}
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
index f785d52f..332b543c 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/quality.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: Quality
on:
diff --git a/.github/workflows/test-docker-build.yml b/.github/workflows/test-docker-build.yml
index 3ee84a27..e77c570e 100644
--- a/.github/workflows/test-docker-build.yml
+++ b/.github/workflows/test-docker-build.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Inspired by
# https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
name: Test Dockerfiles
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 9c3f5756..d91c5364 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
name: Tests
on:
@@ -112,7 +126,7 @@ jobs:
# portaudio19-dev is needed to install pyaudio
run: |
sudo apt-get update && \
- sudo apt-get install -y libegl1-mesa-dev portaudio19-dev
+ sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
- name: Install uv and python
uses: astral-sh/setup-uv@v5
diff --git a/.github/workflows/trufflehog.yml b/.github/workflows/trufflehog.yml
index 487ccea5..166e0590 100644
--- a/.github/workflows/trufflehog.yml
+++ b/.github/workflows/trufflehog.yml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
on:
push:
diff --git a/.gitignore b/.gitignore
index 0a0ffe10..d6c51c90 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Logging
logs
tmp
@@ -64,7 +78,7 @@ pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
-!tests/data
+!tests/artifacts
htmlcov/
.tox/
.nox/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index dba87705..e699f543 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,28 @@
-exclude: ^(tests/data)
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exclude: "tests/artifacts/.*\\.safetensors$"
default_language_version:
python: python3.10
repos:
+ ##### Meta #####
+ - repo: meta
+ hooks:
+ - id: check-useless-excludes
+ - id: check-hooks-apply
+
+
##### Style / Misc. #####
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
@@ -14,31 +35,37 @@ repos:
- id: check-toml
- id: end-of-file-fixer
- id: trailing-whitespace
+
- repo: https://github.com/crate-ci/typos
- rev: v1.30.0
+ rev: v1.30.2
hooks:
- id: typos
args: [--force-exclude]
+
- repo: https://github.com/asottile/pyupgrade
rev: v3.19.1
hooks:
- id: pyupgrade
+
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.9.9
+ rev: v0.9.10
hooks:
- id: ruff
args: [--fix]
- id: ruff-format
+
##### Security #####
- repo: https://github.com/gitleaks/gitleaks
rev: v8.24.0
hooks:
- id: gitleaks
+
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.4.1
hooks:
- id: zizmor
+
- repo: https://github.com/PyCQA/bandit
rev: 1.8.3
hooks:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 8aff26d8..a9e4a856 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -291,7 +291,7 @@ sudo apt-get install git-lfs
git lfs install
```
-Pull artifacts if they're not in [tests/data](tests/data)
+Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
```bash
git lfs pull
```
diff --git a/Makefile b/Makefile
index 772da320..c82483cc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
.PHONY: tests
PYTHON_PATH := $(shell which python)
@@ -33,6 +47,7 @@ test-act-ete-train:
--policy.dim_model=64 \
--policy.n_action_steps=20 \
--policy.chunk_size=20 \
+ --policy.device=$(DEVICE) \
--env.type=aloha \
--env.episode_length=5 \
--dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
@@ -47,7 +62,6 @@ test-act-ete-train:
--save_checkpoint=true \
--log_freq=1 \
--wandb.enable=false \
- --device=$(DEVICE) \
--output_dir=tests/outputs/act/
test-act-ete-train-resume:
@@ -58,11 +72,11 @@ test-act-ete-train-resume:
test-act-ete-eval:
python lerobot/scripts/eval.py \
--policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
+ --policy.device=$(DEVICE) \
--env.type=aloha \
--env.episode_length=5 \
--eval.n_episodes=1 \
- --eval.batch_size=1 \
- --device=$(DEVICE)
+ --eval.batch_size=1
test-diffusion-ete-train:
python lerobot/scripts/train.py \
@@ -70,6 +84,7 @@ test-diffusion-ete-train:
--policy.down_dims='[64,128,256]' \
--policy.diffusion_step_embed_dim=32 \
--policy.num_inference_steps=10 \
+ --policy.device=$(DEVICE) \
--env.type=pusht \
--env.episode_length=5 \
--dataset.repo_id=lerobot/pusht \
@@ -84,21 +99,21 @@ test-diffusion-ete-train:
--save_freq=2 \
--log_freq=1 \
--wandb.enable=false \
- --device=$(DEVICE) \
--output_dir=tests/outputs/diffusion/
test-diffusion-ete-eval:
python lerobot/scripts/eval.py \
--policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
+ --policy.device=$(DEVICE) \
--env.type=pusht \
--env.episode_length=5 \
--eval.n_episodes=1 \
- --eval.batch_size=1 \
- --device=$(DEVICE)
+ --eval.batch_size=1
test-tdmpc-ete-train:
python lerobot/scripts/train.py \
--policy.type=tdmpc \
+ --policy.device=$(DEVICE) \
--env.type=xarm \
--env.task=XarmLift-v0 \
--env.episode_length=5 \
@@ -114,15 +129,14 @@ test-tdmpc-ete-train:
--save_freq=2 \
--log_freq=1 \
--wandb.enable=false \
- --device=$(DEVICE) \
--output_dir=tests/outputs/tdmpc/
test-tdmpc-ete-eval:
python lerobot/scripts/eval.py \
--policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
+ --policy.device=$(DEVICE) \
--env.type=xarm \
--env.episode_length=5 \
--env.task=XarmLift-v0 \
--eval.n_episodes=1 \
- --eval.batch_size=1 \
- --device=$(DEVICE)
+ --eval.batch_size=1
diff --git a/README.md b/README.md
index 9debf9d1..effbb08b 100644
--- a/README.md
+++ b/README.md
@@ -232,8 +232,8 @@ python lerobot/scripts/eval.py \
--env.type=pusht \
--eval.batch_size=10 \
--eval.n_episodes=10 \
- --use_amp=false \
- --device=cuda
+ --policy.use_amp=false \
+ --policy.device=cuda
```
Note: After training your own policy, you can re-evaluate the checkpoints with:
@@ -384,3 +384,6 @@ Additionally, if you are using any of the particular policy architecture, pretra
year={2024}
}
```
+## Star History
+
+[](https://star-history.com/#huggingface/lerobot&Timeline)
diff --git a/benchmarks/video/run_video_benchmark.py b/benchmarks/video/run_video_benchmark.py
index e9066487..c62578c4 100644
--- a/benchmarks/video/run_video_benchmark.py
+++ b/benchmarks/video/run_video_benchmark.py
@@ -67,7 +67,7 @@ def parse_int_or_none(value) -> int | None:
def check_datasets_formats(repo_ids: list) -> None:
for repo_id in repo_ids:
dataset = LeRobotDataset(repo_id)
- if dataset.video:
+ if len(dataset.meta.video_keys) > 0:
raise ValueError(
f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
)
diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md
index 23da1eab..d2423229 100644
--- a/examples/10_use_so100.md
+++ b/examples/10_use_so100.md
@@ -99,22 +99,22 @@ Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem5
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
+Remove the usb cable from your MotorsBus and press Enter when done.
[...Disconnect leader arm and press Enter...]
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
+The port of this MotorsBus is /dev/tty.usbmodem575E0031751
Reconnect the usb cable.
```
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
-Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
+Remove the usb cable from your MotorsBus and press Enter when done.
[...Disconnect follower arm and press Enter...]
-The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
+The port of this MotorsBus is /dev/tty.usbmodem575E0032081
Reconnect the usb cable.
```
@@ -454,8 +454,8 @@ Next, you'll need to calibrate your SO-100 robot to ensure that the leader and f
You will need to move the follower arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
|
|
Make sure both arms are connected and run this script to launch manual calibration:
@@ -470,8 +470,8 @@ python lerobot/scripts/control_robot.py \
#### b. Manual calibration of leader arm
Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
|
|
Run this script to launch manual calibration:
@@ -571,18 +571,25 @@ python lerobot/scripts/train.py \
--policy.type=act \
--output_dir=outputs/train/act_so100_test \
--job_name=act_so100_test \
- --device=cuda \
+ --policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
+4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`.
+To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so100_test` policy:
+```bash
+python lerobot/scripts/train.py \
+ --config_path=outputs/train/act_so100_test/checkpoints/last/pretrained_model/train_config.json \
+ --resume=true
+```
+
## K. Evaluate your policy
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
diff --git a/examples/11_use_lekiwi.md b/examples/11_use_lekiwi.md
index 4b811417..dc310af2 100644
--- a/examples/11_use_lekiwi.md
+++ b/examples/11_use_lekiwi.md
@@ -366,8 +366,8 @@ Now we have to calibrate the leader arm and the follower arm. The wheel motors d
You will need to move the follower arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
|
|
Make sure the arm is connected to the Raspberry Pi and run this script (on the Raspberry Pi) to launch manual calibration:
@@ -385,8 +385,8 @@ If you have the **wired** LeKiwi version please run all commands including this
### Calibrate leader arm
Then to calibrate the leader arm (which is attached to the laptop/pc). You will need to move the leader arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
|
|
Run this script (on your laptop/pc) to launch manual calibration:
@@ -416,22 +416,22 @@ python lerobot/scripts/control_robot.py \
You should see on your laptop something like this: ```[INFO] Connected to remote robot at tcp://172.17.133.91:5555 and video stream at tcp://172.17.133.91:5556.``` Now you can move the leader arm and use the keyboard (w,a,s,d) to drive forward, left, backwards, right. And use (z,x) to turn left or turn right. You can use (r,f) to increase and decrease the speed of the mobile robot. There are three speed modes, see the table below:
| Speed Mode | Linear Speed (m/s) | Rotation Speed (deg/s) |
-|------------|-------------------|-----------------------|
-| Fast | 0.4 | 90 |
-| Medium | 0.25 | 60 |
-| Slow | 0.1 | 30 |
+| ---------- | ------------------ | ---------------------- |
+| Fast | 0.4 | 90 |
+| Medium | 0.25 | 60 |
+| Slow | 0.1 | 30 |
-| Key | Action |
-|------|--------------------------------|
-| W | Move forward |
-| A | Move left |
-| S | Move backward |
-| D | Move right |
-| Z | Turn left |
-| X | Turn right |
-| R | Increase speed |
-| F | Decrease speed |
+| Key | Action |
+| --- | -------------- |
+| W | Move forward |
+| A | Move left |
+| S | Move backward |
+| D | Move right |
+| Z | Turn left |
+| X | Turn right |
+| R | Increase speed |
+| F | Decrease speed |
> [!TIP]
> If you use a different keyboard you can change the keys for each command in the [`LeKiwiRobotConfig`](../lerobot/common/robot_devices/robots/configs.py).
@@ -549,14 +549,14 @@ python lerobot/scripts/train.py \
--policy.type=act \
--output_dir=outputs/train/act_lekiwi_test \
--job_name=act_lekiwi_test \
- --device=cuda \
+ --policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/lekiwi_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
+4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
Training should take several hours. You will find checkpoints in `outputs/train/act_lekiwi_test/checkpoints`.
diff --git a/examples/11_use_moss.md b/examples/11_use_moss.md
index 67f8157e..d2e02076 100644
--- a/examples/11_use_moss.md
+++ b/examples/11_use_moss.md
@@ -176,8 +176,8 @@ Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and
You will need to move the follower arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
|
|
Make sure both arms are connected and run this script to launch manual calibration:
@@ -192,8 +192,8 @@ python lerobot/scripts/control_robot.py \
**Manual calibration of leader arm**
Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
|
|
Run this script to launch manual calibration:
@@ -293,14 +293,14 @@ python lerobot/scripts/train.py \
--policy.type=act \
--output_dir=outputs/train/act_moss_test \
--job_name=act_moss_test \
- --device=cuda \
+ --policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
+4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`.
diff --git a/examples/1_load_lerobot_dataset.py b/examples/1_load_lerobot_dataset.py
index 96c104b6..c374a375 100644
--- a/examples/1_load_lerobot_dataset.py
+++ b/examples/1_load_lerobot_dataset.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py
index 0a7b8deb..edbbad38 100644
--- a/examples/2_evaluate_pretrained_policy.py
+++ b/examples/2_evaluate_pretrained_policy.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
@@ -30,7 +44,7 @@ pretrained_policy_path = "lerobot/diffusion_pusht"
# OR a path to a local outputs/train folder.
# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
-policy = DiffusionPolicy.from_pretrained(pretrained_policy_path, map_location=device)
+policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
# Initialize evaluation environment to render two observation types:
# an image of the scene and state/position of the agent. The environment
diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py
index f6eabbfa..6c3af54e 100644
--- a/examples/3_train_policy.py
+++ b/examples/3_train_policy.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""This scripts demonstrates how to train Diffusion Policy on the PushT environment.
Once you have trained a model with this script, you can try to evaluate it on
diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md
index 58ed239a..b23d2271 100644
--- a/examples/4_train_policy_with_script.md
+++ b/examples/4_train_policy_with_script.md
@@ -1,5 +1,5 @@
This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
-> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--device=cpu` (`--device=mps` respectively). However, be advised that the code executes much slower on cpu.
+> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
## The training script
diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md
index f4bef69c..894c44a3 100644
--- a/examples/7_get_started_with_real_robot.md
+++ b/examples/7_get_started_with_real_robot.md
@@ -386,14 +386,14 @@ When you connect your robot for the first time, the [`ManipulatorRobot`](../lero
Here are the positions you'll move the follower arm to:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
|
|
And here are the corresponding positions for the leader arm:
-| 1. Zero position | 2. Rotated position | 3. Rest position |
-|---|---|---|
+| 1. Zero position | 2. Rotated position | 3. Rest position |
+| ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
|
|
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
@@ -898,14 +898,14 @@ python lerobot/scripts/train.py \
--policy.type=act \
--output_dir=outputs/train/act_koch_test \
--job_name=act_koch_test \
- --device=cuda \
+ --policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
+4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
diff --git a/examples/9_use_aloha.md b/examples/9_use_aloha.md
index 055551f0..62dee588 100644
--- a/examples/9_use_aloha.md
+++ b/examples/9_use_aloha.md
@@ -135,14 +135,14 @@ python lerobot/scripts/train.py \
--policy.type=act \
--output_dir=outputs/train/act_aloha_test \
--job_name=act_aloha_test \
- --device=cuda \
+ --policy.device=cuda \
--wandb.enable=true
```
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
-4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
+4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
diff --git a/examples/advanced/1_add_image_transforms.py b/examples/advanced/1_add_image_transforms.py
index 882710e3..f1460926 100644
--- a/examples/advanced/1_add_image_transforms.py
+++ b/examples/advanced/1_add_image_transforms.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py
index 6f234719..47b4dd02 100644
--- a/examples/advanced/2_calculate_validation_loss.py
+++ b/examples/advanced/2_calculate_validation_loss.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
diff --git a/examples/port_datasets/pusht_zarr.py b/examples/port_datasets/pusht_zarr.py
index eac6f63d..ea2e8b60 100644
--- a/examples/port_datasets/pusht_zarr.py
+++ b/examples/port_datasets/pusht_zarr.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import shutil
from pathlib import Path
diff --git a/lerobot/common/constants.py b/lerobot/common/constants.py
index d0c9845a..973595cd 100644
--- a/lerobot/common/constants.py
+++ b/lerobot/common/constants.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
# keys
import os
from pathlib import Path
diff --git a/lerobot/common/datasets/backward_compatibility.py b/lerobot/common/datasets/backward_compatibility.py
index d1b8926a..cf8e31c4 100644
--- a/lerobot/common/datasets/backward_compatibility.py
+++ b/lerobot/common/datasets/backward_compatibility.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import packaging.version
V2_MESSAGE = """
diff --git a/lerobot/common/datasets/lerobot_dataset.py b/lerobot/common/datasets/lerobot_dataset.py
index 5414c76d..101e71f4 100644
--- a/lerobot/common/datasets/lerobot_dataset.py
+++ b/lerobot/common/datasets/lerobot_dataset.py
@@ -67,7 +67,7 @@ from lerobot.common.datasets.utils import (
)
from lerobot.common.datasets.video_utils import (
VideoFrame,
- decode_video_frames_torchvision,
+ decode_video_frames,
encode_video_frames,
get_video_info,
)
@@ -462,8 +462,8 @@ class LeRobotDataset(torch.utils.data.Dataset):
download_videos (bool, optional): Flag to download the videos. Note that when set to True but the
video files are already present on local disk, they won't be downloaded again. Defaults to
True.
- video_backend (str | None, optional): Video backend to use for decoding videos. There is currently
- a single option which is the pyav decoder used by Torchvision. Defaults to pyav.
+ video_backend (str | None, optional): Video backend to use for decoding videos. Defaults to torchcodec.
+ You can also use the 'pyav' decoder used by Torchvision, which used to be the default option, or 'video_reader' which is another decoder of Torchvision.
"""
super().__init__()
self.repo_id = repo_id
@@ -473,7 +473,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
self.episodes = episodes
self.tolerance_s = tolerance_s
self.revision = revision if revision else CODEBASE_VERSION
- self.video_backend = video_backend if video_backend else "pyav"
+ self.video_backend = video_backend if video_backend else "torchcodec"
self.delta_indices = None
# Unused attributes
@@ -707,9 +707,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
item = {}
for vid_key, query_ts in query_timestamps.items():
video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key)
- frames = decode_video_frames_torchvision(
- video_path, query_ts, self.tolerance_s, self.video_backend
- )
+ frames = decode_video_frames(video_path, query_ts, self.tolerance_s, self.video_backend)
item[vid_key] = frames.squeeze(0)
return item
@@ -1029,7 +1027,7 @@ class LeRobotDataset(torch.utils.data.Dataset):
obj.delta_timestamps = None
obj.delta_indices = None
obj.episode_data_index = None
- obj.video_backend = video_backend if video_backend is not None else "pyav"
+ obj.video_backend = video_backend if video_backend is not None else "torchcodec"
return obj
diff --git a/lerobot/common/datasets/v21/_remove_language_instruction.py b/lerobot/common/datasets/v21/_remove_language_instruction.py
index dd4604cf..643ddd3f 100644
--- a/lerobot/common/datasets/v21/_remove_language_instruction.py
+++ b/lerobot/common/datasets/v21/_remove_language_instruction.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import logging
import traceback
from pathlib import Path
diff --git a/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py b/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py
index 163a6003..176d16d0 100644
--- a/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py
+++ b/lerobot/common/datasets/v21/convert_dataset_v20_to_v21.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 2.0 to
2.1. It will:
diff --git a/lerobot/common/datasets/v21/convert_stats.py b/lerobot/common/datasets/v21/convert_stats.py
index cbf584b7..4a20b427 100644
--- a/lerobot/common/datasets/v21/convert_stats.py
+++ b/lerobot/common/datasets/v21/convert_stats.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
diff --git a/lerobot/common/datasets/video_utils.py b/lerobot/common/datasets/video_utils.py
index 9f043f96..3fe19d8b 100644
--- a/lerobot/common/datasets/video_utils.py
+++ b/lerobot/common/datasets/video_utils.py
@@ -27,6 +27,35 @@ import torch
import torchvision
from datasets.features.features import register_feature
from PIL import Image
+from torchcodec.decoders import VideoDecoder
+
+
+def decode_video_frames(
+ video_path: Path | str,
+ timestamps: list[float],
+ tolerance_s: float,
+ backend: str = "torchcodec",
+) -> torch.Tensor:
+ """
+ Decodes video frames using the specified backend.
+
+ Args:
+ video_path (Path): Path to the video file.
+ timestamps (list[float]): List of timestamps to extract frames.
+ tolerance_s (float): Allowed deviation in seconds for frame retrieval.
+ backend (str, optional): Backend to use for decoding. Defaults to "torchcodec".
+
+ Returns:
+ torch.Tensor: Decoded frames.
+
+ Currently supports torchcodec on cpu and pyav.
+ """
+ if backend == "torchcodec":
+ return decode_video_frames_torchcodec(video_path, timestamps, tolerance_s)
+ elif backend in ["pyav", "video_reader"]:
+ return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
+ else:
+ raise ValueError(f"Unsupported video backend: {backend}")
def decode_video_frames_torchvision(
@@ -127,6 +156,75 @@ def decode_video_frames_torchvision(
return closest_frames
+def decode_video_frames_torchcodec(
+ video_path: Path | str,
+ timestamps: list[float],
+ tolerance_s: float,
+ device: str = "cpu",
+ log_loaded_timestamps: bool = False,
+) -> torch.Tensor:
+ """Loads frames associated with the requested timestamps of a video using torchcodec.
+
+ Note: Setting device="cuda" outside the main process, e.g. in data loader workers, will lead to CUDA initialization errors.
+
+ Note: Video benefits from inter-frame compression. Instead of storing every frame individually,
+ the encoder stores a reference frame (or a key frame) and subsequent frames as differences relative to
+ that key frame. As a consequence, to access a requested frame, we need to load the preceding key frame,
+ and all subsequent frames until reaching the requested frame. The number of key frames in a video
+ can be adjusted during encoding to take into account decoding time and video size in bytes.
+ """
+ # initialize video decoder
+ decoder = VideoDecoder(video_path, device=device, seek_mode="approximate")
+ loaded_frames = []
+ loaded_ts = []
+ # get metadata for frame information
+ metadata = decoder.metadata
+ average_fps = metadata.average_fps
+
+ # convert timestamps to frame indices
+ frame_indices = [round(ts * average_fps) for ts in timestamps]
+
+ # retrieve frames based on indices
+ frames_batch = decoder.get_frames_at(indices=frame_indices)
+
+ for frame, pts in zip(frames_batch.data, frames_batch.pts_seconds, strict=False):
+ loaded_frames.append(frame)
+ loaded_ts.append(pts.item())
+ if log_loaded_timestamps:
+ logging.info(f"Frame loaded at timestamp={pts:.4f}")
+
+ query_ts = torch.tensor(timestamps)
+ loaded_ts = torch.tensor(loaded_ts)
+
+ # compute distances between each query timestamp and loaded timestamps
+ dist = torch.cdist(query_ts[:, None], loaded_ts[:, None], p=1)
+ min_, argmin_ = dist.min(1)
+
+ is_within_tol = min_ < tolerance_s
+ assert is_within_tol.all(), (
+ f"One or several query timestamps unexpectedly violate the tolerance ({min_[~is_within_tol]} > {tolerance_s=})."
+ "It means that the closest frame that can be loaded from the video is too far away in time."
+ "This might be due to synchronization issues with timestamps during data collection."
+ "To be safe, we advise to ignore this item during training."
+ f"\nqueried timestamps: {query_ts}"
+ f"\nloaded timestamps: {loaded_ts}"
+ f"\nvideo: {video_path}"
+ )
+
+ # get closest frames to the query timestamps
+ closest_frames = torch.stack([loaded_frames[idx] for idx in argmin_])
+ closest_ts = loaded_ts[argmin_]
+
+ if log_loaded_timestamps:
+ logging.info(f"{closest_ts=}")
+
+ # convert to float32 in [0,1] range (channel first)
+ closest_frames = closest_frames.type(torch.float32) / 255
+
+ assert len(timestamps) == len(closest_frames)
+ return closest_frames
+
+
def encode_video_frames(
imgs_dir: Path | str,
video_path: Path | str,
diff --git a/lerobot/common/envs/__init__.py b/lerobot/common/envs/__init__.py
index a583ffc5..4977d11d 100644
--- a/lerobot/common/envs/__init__.py
+++ b/lerobot/common/envs/__init__.py
@@ -1 +1,15 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from .configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv # noqa: F401
diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py
index 6259ca94..cf90048a 100644
--- a/lerobot/common/envs/configs.py
+++ b/lerobot/common/envs/configs.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import abc
from dataclasses import dataclass, field
diff --git a/lerobot/common/optim/__init__.py b/lerobot/common/optim/__init__.py
index e1e65966..de2c4c99 100644
--- a/lerobot/common/optim/__init__.py
+++ b/lerobot/common/optim/__init__.py
@@ -1 +1,15 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from .optimizers import OptimizerConfig as OptimizerConfig
diff --git a/lerobot/common/policies/__init__.py b/lerobot/common/policies/__init__.py
index d212ef7e..00e28269 100644
--- a/lerobot/common/policies/__init__.py
+++ b/lerobot/common/policies/__init__.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from .act.configuration_act import ACTConfig as ACTConfig
from .dexvla.configuration_dexvla import DexVLAConfig as DexVLAConfig
from .diffusion.configuration_diffusion import DiffusionConfig as DiffusionConfig
diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py
index 299877a6..249ea8cd 100644
--- a/lerobot/common/policies/factory.py
+++ b/lerobot/common/policies/factory.py
@@ -16,7 +16,6 @@
import logging
-import torch
from torch import nn
from lerobot.common.datasets.lerobot_dataset import LeRobotDatasetMetadata
@@ -83,7 +82,6 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig:
def make_policy(
cfg: PreTrainedConfig,
- device: str | torch.device,
ds_meta: LeRobotDatasetMetadata | None = None,
env_cfg: EnvConfig | None = None,
) -> PreTrainedPolicy:
@@ -95,7 +93,6 @@ def make_policy(
Args:
cfg (PreTrainedConfig): The config of the policy to make. If `pretrained_path` is set, the policy will
be loaded with the weights from that path.
- device (str): the device to load the policy onto.
ds_meta (LeRobotDatasetMetadata | None, optional): Dataset metadata to take input/output shapes and
statistics to use for (un)normalization of inputs/outputs in the policy. Defaults to None.
env_cfg (EnvConfig | None, optional): The config of a gym environment to parse features from. Must be
@@ -103,7 +100,7 @@ def make_policy(
Raises:
ValueError: Either ds_meta or env and env_cfg must be provided.
- NotImplementedError: if the policy.type is 'vqbet' and the device 'mps' (due to an incompatibility)
+ NotImplementedError: if the policy.type is 'vqbet' and the policy device 'mps' (due to an incompatibility)
Returns:
PreTrainedPolicy: _description_
@@ -118,7 +115,7 @@ def make_policy(
# https://github.com/pytorch/pytorch/issues/77764. As a temporary fix, you can set the environment
# variable `PYTORCH_ENABLE_MPS_FALLBACK=1` to use the CPU as a fallback for this op. WARNING: this will be
# slower than running natively on MPS.
- if cfg.type == "vqbet" and str(device) == "mps":
+ if cfg.type == "vqbet" and cfg.device == "mps":
raise NotImplementedError(
"Current implementation of VQBeT does not support `mps` backend. "
"Please use `cpu` or `cuda` backend."
@@ -152,7 +149,7 @@ def make_policy(
# Make a fresh policy.
policy = policy_cls(**kwargs)
- policy.to(device)
+ policy.to(cfg.device)
assert isinstance(policy, nn.Module)
# policy = torch.compile(policy, mode="reduce-overhead")
diff --git a/lerobot/common/policies/pi0/configuration_pi0.py b/lerobot/common/policies/pi0/configuration_pi0.py
index 8d2eedf6..8c7cc130 100644
--- a/lerobot/common/policies/pi0/configuration_pi0.py
+++ b/lerobot/common/policies/pi0/configuration_pi0.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from dataclasses import dataclass, field
from lerobot.common.optim.optimizers import AdamWConfig
@@ -76,6 +90,7 @@ class PI0Config(PreTrainedConfig):
def __post_init__(self):
super().__post_init__()
+ # TODO(Steven): Validate device and amp? in all policy configs?
"""Input validation (not exhaustive)."""
if self.n_action_steps > self.chunk_size:
raise ValueError(
diff --git a/lerobot/common/policies/pi0/conversion_scripts/benchmark.py b/lerobot/common/policies/pi0/conversion_scripts/benchmark.py
index 31bd1b66..cb3c0e9b 100644
--- a/lerobot/common/policies/pi0/conversion_scripts/benchmark.py
+++ b/lerobot/common/policies/pi0/conversion_scripts/benchmark.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import torch
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
@@ -31,7 +45,7 @@ def main():
cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
cfg.pretrained_path = ckpt_torch_dir
- policy = make_policy(cfg, device, ds_meta=dataset.meta)
+ policy = make_policy(cfg, ds_meta=dataset.meta)
# policy = torch.compile(policy, mode="reduce-overhead")
diff --git a/lerobot/common/policies/pi0/conversion_scripts/compare_with_jax.py b/lerobot/common/policies/pi0/conversion_scripts/compare_with_jax.py
index 8b2e1c66..6bd7c91f 100644
--- a/lerobot/common/policies/pi0/conversion_scripts/compare_with_jax.py
+++ b/lerobot/common/policies/pi0/conversion_scripts/compare_with_jax.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import json
import pickle
from pathlib import Path
@@ -87,7 +101,7 @@ def main():
cfg = PreTrainedConfig.from_pretrained(ckpt_torch_dir)
cfg.pretrained_path = ckpt_torch_dir
- policy = make_policy(cfg, device, dataset_meta)
+ policy = make_policy(cfg, dataset_meta)
# loss_dict = policy.forward(batch, noise=noise, time=time_beta)
# loss_dict["loss"].backward()
diff --git a/lerobot/common/policies/pi0/conversion_scripts/conversion_utils.py b/lerobot/common/policies/pi0/conversion_scripts/conversion_utils.py
index 8e35d0d4..8835da31 100644
--- a/lerobot/common/policies/pi0/conversion_scripts/conversion_utils.py
+++ b/lerobot/common/policies/pi0/conversion_scripts/conversion_utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from transformers import GemmaConfig, PaliGemmaConfig
diff --git a/lerobot/common/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py b/lerobot/common/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py
index dd8622dd..73ff506f 100644
--- a/lerobot/common/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py
+++ b/lerobot/common/policies/pi0/conversion_scripts/convert_pi0_to_hf_lerobot.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
Convert pi0 parameters from Jax to Pytorch
diff --git a/lerobot/common/policies/pi0/flex_attention.py b/lerobot/common/policies/pi0/flex_attention.py
index 38a5b597..35628cdd 100644
--- a/lerobot/common/policies/pi0/flex_attention.py
+++ b/lerobot/common/policies/pi0/flex_attention.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import torch
import torch.nn.functional as F # noqa: N812
from packaging.version import Version
diff --git a/lerobot/common/policies/pi0/paligemma_with_expert.py b/lerobot/common/policies/pi0/paligemma_with_expert.py
index 08c36c11..76e2ce60 100644
--- a/lerobot/common/policies/pi0/paligemma_with_expert.py
+++ b/lerobot/common/policies/pi0/paligemma_with_expert.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from typing import List, Optional, Union
import torch
diff --git a/lerobot/common/policies/pretrained.py b/lerobot/common/policies/pretrained.py
index 1729dfb0..da4ef157 100644
--- a/lerobot/common/policies/pretrained.py
+++ b/lerobot/common/policies/pretrained.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import abc
import logging
import os
@@ -73,7 +86,6 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
cache_dir: str | Path | None = None,
local_files_only: bool = False,
revision: str | None = None,
- map_location: str = "cpu",
strict: bool = False,
**kwargs,
) -> T:
@@ -98,7 +110,7 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
if os.path.isdir(model_id):
print("Loading weights from local directory")
model_file = os.path.join(model_id, SAFETENSORS_SINGLE_FILE)
- policy = cls._load_as_safetensor(instance, model_file, map_location, strict)
+ policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
else:
try:
model_file = hf_hub_download(
@@ -112,13 +124,13 @@ class PreTrainedPolicy(nn.Module, HubMixin, abc.ABC):
token=token,
local_files_only=local_files_only,
)
- policy = cls._load_as_safetensor(instance, model_file, map_location, strict)
+ policy = cls._load_as_safetensor(instance, model_file, config.device, strict)
except HfHubHTTPError as e:
raise FileNotFoundError(
f"{SAFETENSORS_SINGLE_FILE} not found on the HuggingFace Hub in {model_id}"
) from e
- policy.to(map_location)
+ policy.to(config.device)
policy.eval()
return policy
diff --git a/lerobot/common/robot_devices/cameras/configs.py b/lerobot/common/robot_devices/cameras/configs.py
index 6acdbd3e..013419a9 100644
--- a/lerobot/common/robot_devices/cameras/configs.py
+++ b/lerobot/common/robot_devices/cameras/configs.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import abc
from dataclasses import dataclass
diff --git a/lerobot/common/robot_devices/cameras/intelrealsense.py b/lerobot/common/robot_devices/cameras/intelrealsense.py
index 7e65dba9..7a21661a 100644
--- a/lerobot/common/robot_devices/cameras/intelrealsense.py
+++ b/lerobot/common/robot_devices/cameras/intelrealsense.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This file contains utilities for recording frames from Intel Realsense cameras.
"""
@@ -34,7 +48,7 @@ def find_cameras(raise_when_empty=True, mock=False) -> list[dict]:
connected to the computer.
"""
if mock:
- import tests.mock_pyrealsense2 as rs
+ import tests.cameras.mock_pyrealsense2 as rs
else:
import pyrealsense2 as rs
@@ -86,7 +100,7 @@ def save_images_from_cameras(
serial_numbers = [cam["serial_number"] for cam in camera_infos]
if mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
@@ -100,7 +114,7 @@ def save_images_from_cameras(
camera = IntelRealSenseCamera(config)
camera.connect()
print(
- f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})"
+ f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.capture_width}, height={camera.capture_height}, color_mode={camera.color_mode})"
)
cameras.append(camera)
@@ -210,9 +224,20 @@ class IntelRealSenseCamera:
self.serial_number = self.find_serial_number_from_name(config.name)
else:
self.serial_number = config.serial_number
+
+ # Store the raw (capture) resolution from the config.
+ self.capture_width = config.width
+ self.capture_height = config.height
+
+ # If rotated by ±90, swap width and height.
+ if config.rotation in [-90, 90]:
+ self.width = config.height
+ self.height = config.width
+ else:
+ self.width = config.width
+ self.height = config.height
+
self.fps = config.fps
- self.width = config.width
- self.height = config.height
self.channels = config.channels
self.color_mode = config.color_mode
self.use_depth = config.use_depth
@@ -228,11 +253,10 @@ class IntelRealSenseCamera:
self.logs = {}
if self.mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
- # TODO(alibets): Do we keep original width/height or do we define them after rotation?
self.rotation = None
if config.rotation == -90:
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
@@ -263,22 +287,26 @@ class IntelRealSenseCamera:
)
if self.mock:
- import tests.mock_pyrealsense2 as rs
+ import tests.cameras.mock_pyrealsense2 as rs
else:
import pyrealsense2 as rs
config = rs.config()
config.enable_device(str(self.serial_number))
- if self.fps and self.width and self.height:
+ if self.fps and self.capture_width and self.capture_height:
# TODO(rcadene): can we set rgb8 directly?
- config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps)
+ config.enable_stream(
+ rs.stream.color, self.capture_width, self.capture_height, rs.format.rgb8, self.fps
+ )
else:
config.enable_stream(rs.stream.color)
if self.use_depth:
- if self.fps and self.width and self.height:
- config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps)
+ if self.fps and self.capture_width and self.capture_height:
+ config.enable_stream(
+ rs.stream.depth, self.capture_width, self.capture_height, rs.format.z16, self.fps
+ )
else:
config.enable_stream(rs.stream.depth)
@@ -316,18 +344,18 @@ class IntelRealSenseCamera:
raise OSError(
f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}."
)
- if self.width is not None and self.width != actual_width:
+ if self.capture_width is not None and self.capture_width != actual_width:
raise OSError(
- f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}."
+ f"Can't set {self.capture_width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}."
)
- if self.height is not None and self.height != actual_height:
+ if self.capture_height is not None and self.capture_height != actual_height:
raise OSError(
- f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}."
+ f"Can't set {self.capture_height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}."
)
self.fps = round(actual_fps)
- self.width = round(actual_width)
- self.height = round(actual_height)
+ self.capture_width = round(actual_width)
+ self.capture_height = round(actual_height)
self.is_connected = True
@@ -347,7 +375,7 @@ class IntelRealSenseCamera:
)
if self.mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
@@ -373,7 +401,7 @@ class IntelRealSenseCamera:
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
h, w, _ = color_image.shape
- if h != self.height or w != self.width:
+ if h != self.capture_height or w != self.capture_width:
raise OSError(
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
)
@@ -395,7 +423,7 @@ class IntelRealSenseCamera:
depth_map = np.asanyarray(depth_frame.get_data())
h, w = depth_map.shape
- if h != self.height or w != self.width:
+ if h != self.capture_height or w != self.capture_width:
raise OSError(
f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
)
diff --git a/lerobot/common/robot_devices/cameras/opencv.py b/lerobot/common/robot_devices/cameras/opencv.py
index 93c791fa..f279f315 100644
--- a/lerobot/common/robot_devices/cameras/opencv.py
+++ b/lerobot/common/robot_devices/cameras/opencv.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""
This file contains utilities for recording frames from cameras. For more info look at `OpenCVCamera` docstring.
"""
@@ -66,7 +80,7 @@ def _find_cameras(
possible_camera_ids: list[int | str], raise_when_empty=False, mock=False
) -> list[int | str]:
if mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
@@ -130,8 +144,8 @@ def save_images_from_cameras(
camera = OpenCVCamera(config)
camera.connect()
print(
- f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, "
- f"height={camera.height}, color_mode={camera.color_mode})"
+ f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.capture_width}, "
+ f"height={camera.capture_height}, color_mode={camera.color_mode})"
)
cameras.append(camera)
@@ -230,9 +244,19 @@ class OpenCVCamera:
else:
raise ValueError(f"Please check the provided camera_index: {self.camera_index}")
+ # Store the raw (capture) resolution from the config.
+ self.capture_width = config.width
+ self.capture_height = config.height
+
+ # If rotated by ±90, swap width and height.
+ if config.rotation in [-90, 90]:
+ self.width = config.height
+ self.height = config.width
+ else:
+ self.width = config.width
+ self.height = config.height
+
self.fps = config.fps
- self.width = config.width
- self.height = config.height
self.channels = config.channels
self.color_mode = config.color_mode
self.mock = config.mock
@@ -245,11 +269,10 @@ class OpenCVCamera:
self.logs = {}
if self.mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
- # TODO(aliberts): Do we keep original width/height or do we define them after rotation?
self.rotation = None
if config.rotation == -90:
self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
@@ -263,7 +286,7 @@ class OpenCVCamera:
raise RobotDeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.")
if self.mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
@@ -271,10 +294,20 @@ class OpenCVCamera:
# when other threads are used to save the images.
cv2.setNumThreads(1)
+ backend = (
+ cv2.CAP_V4L2
+ if platform.system() == "Linux"
+ else cv2.CAP_DSHOW
+ if platform.system() == "Windows"
+ else cv2.CAP_AVFOUNDATION
+ if platform.system() == "Darwin"
+ else cv2.CAP_ANY
+ )
+
camera_idx = f"/dev/video{self.camera_index}" if platform.system() == "Linux" else self.camera_index
# First create a temporary camera trying to access `camera_index`,
# and verify it is a valid camera by calling `isOpened`.
- tmp_camera = cv2.VideoCapture(camera_idx)
+ tmp_camera = cv2.VideoCapture(camera_idx, backend)
is_camera_open = tmp_camera.isOpened()
# Release camera to make it accessible for `find_camera_indices`
tmp_camera.release()
@@ -297,14 +330,14 @@ class OpenCVCamera:
# Secondly, create the camera that will be used downstream.
# Note: For some unknown reason, calling `isOpened` blocks the camera which then
# needs to be re-created.
- self.camera = cv2.VideoCapture(camera_idx)
+ self.camera = cv2.VideoCapture(camera_idx, backend)
if self.fps is not None:
self.camera.set(cv2.CAP_PROP_FPS, self.fps)
- if self.width is not None:
- self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
- if self.height is not None:
- self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
+ if self.capture_width is not None:
+ self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.capture_width)
+ if self.capture_height is not None:
+ self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.capture_height)
actual_fps = self.camera.get(cv2.CAP_PROP_FPS)
actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH)
@@ -316,19 +349,22 @@ class OpenCVCamera:
raise OSError(
f"Can't set {self.fps=} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}."
)
- if self.width is not None and not math.isclose(self.width, actual_width, rel_tol=1e-3):
+ if self.capture_width is not None and not math.isclose(
+ self.capture_width, actual_width, rel_tol=1e-3
+ ):
raise OSError(
- f"Can't set {self.width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
+ f"Can't set {self.capture_width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}."
)
- if self.height is not None and not math.isclose(self.height, actual_height, rel_tol=1e-3):
+ if self.capture_height is not None and not math.isclose(
+ self.capture_height, actual_height, rel_tol=1e-3
+ ):
raise OSError(
- f"Can't set {self.height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
+ f"Can't set {self.capture_height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}."
)
self.fps = round(actual_fps)
- self.width = round(actual_width)
- self.height = round(actual_height)
-
+ self.capture_width = round(actual_width)
+ self.capture_height = round(actual_height)
self.is_connected = True
def read(self, temporary_color_mode: str | None = None) -> np.ndarray:
@@ -362,14 +398,14 @@ class OpenCVCamera:
# so we convert the image color from BGR to RGB.
if requested_color_mode == "rgb":
if self.mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
h, w, _ = color_image.shape
- if h != self.height or w != self.width:
+ if h != self.capture_height or w != self.capture_width:
raise OSError(
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
)
diff --git a/lerobot/common/robot_devices/cameras/utils.py b/lerobot/common/robot_devices/cameras/utils.py
index 88288ea3..c6431646 100644
--- a/lerobot/common/robot_devices/cameras/utils.py
+++ b/lerobot/common/robot_devices/cameras/utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from typing import Protocol
import numpy as np
@@ -31,7 +45,7 @@ def make_cameras_from_configs(camera_configs: dict[str, CameraConfig]) -> list[C
cameras[key] = IntelRealSenseCamera(cfg)
else:
- raise ValueError(f"The motor type '{cfg.type}' is not valid.")
+ raise ValueError(f"The camera type '{cfg.type}' is not valid.")
return cameras
diff --git a/lerobot/common/robot_devices/control_configs.py b/lerobot/common/robot_devices/control_configs.py
index 2ef1b44b..0ecd8683 100644
--- a/lerobot/common/robot_devices/control_configs.py
+++ b/lerobot/common/robot_devices/control_configs.py
@@ -1,14 +1,25 @@
-import logging
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from dataclasses import dataclass
from pathlib import Path
import draccus
from lerobot.common.robot_devices.robots.configs import RobotConfig
-from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
from lerobot.configs import parser
from lerobot.configs.policies import PreTrainedConfig
-from lerobot.configs.train import TrainPipelineConfig
@dataclass
@@ -43,11 +54,6 @@ class RecordControlConfig(ControlConfig):
# Root directory where the dataset will be stored (e.g. 'dataset/path').
root: str | Path | None = None
policy: PreTrainedConfig | None = None
- # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint.
- device: str | None = None # cuda | cpu | mps
- # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
- # automatic gradient scaling is used.
- use_amp: bool | None = None
# Limit the frames per second. By default, uses the policy fps.
fps: int | None = None
# Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize.
@@ -90,27 +96,6 @@ class RecordControlConfig(ControlConfig):
self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
self.policy.pretrained_path = policy_path
- # When no device or use_amp are given, use the one from training config.
- if self.device is None or self.use_amp is None:
- train_cfg = TrainPipelineConfig.from_pretrained(policy_path)
- if self.device is None:
- self.device = train_cfg.device
- if self.use_amp is None:
- self.use_amp = train_cfg.use_amp
-
- # Automatically switch to available device if necessary
- if not is_torch_device_available(self.device):
- auto_device = auto_select_torch_device()
- logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
- self.device = auto_device
-
- # Automatically deactivate AMP if necessary
- if self.use_amp and not is_amp_available(self.device):
- logging.warning(
- f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
- )
- self.use_amp = False
-
@ControlConfig.register_subclass("replay")
@dataclass
diff --git a/lerobot/common/robot_devices/control_utils.py b/lerobot/common/robot_devices/control_utils.py
index d2361a64..78a8c6a6 100644
--- a/lerobot/common/robot_devices/control_utils.py
+++ b/lerobot/common/robot_devices/control_utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
########################################################################################
# Utilities
########################################################################################
@@ -18,6 +32,7 @@ from termcolor import colored
from lerobot.common.datasets.image_writer import safe_stop_image_writer
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.utils import get_features_from_robot
+from lerobot.common.policies.pretrained import PreTrainedPolicy
from lerobot.common.robot_devices.robots.utils import Robot
from lerobot.common.robot_devices.utils import busy_wait
from lerobot.common.utils.utils import get_safe_torch_device, has_method
@@ -179,8 +194,6 @@ def record_episode(
episode_time_s,
display_cameras,
policy,
- device,
- use_amp,
fps,
single_task,
):
@@ -191,8 +204,6 @@ def record_episode(
dataset=dataset,
events=events,
policy=policy,
- device=device,
- use_amp=use_amp,
fps=fps,
teleoperate=policy is None,
single_task=single_task,
@@ -207,9 +218,7 @@ def control_loop(
display_cameras=False,
dataset: LeRobotDataset | None = None,
events=None,
- policy=None,
- device: torch.device | str | None = None,
- use_amp: bool | None = None,
+ policy: PreTrainedPolicy = None,
fps: int | None = None,
single_task: str | None = None,
):
@@ -232,9 +241,6 @@ def control_loop(
if dataset is not None and fps is not None and dataset.fps != fps:
raise ValueError(f"The dataset fps should be equal to requested fps ({dataset['fps']} != {fps}).")
- if isinstance(device, str):
- device = get_safe_torch_device(device)
-
timestamp = 0
start_episode_t = time.perf_counter()
while timestamp < control_time_s:
@@ -246,7 +252,9 @@ def control_loop(
observation = robot.capture_observation()
if policy is not None:
- pred_action = predict_action(observation, policy, device, use_amp)
+ pred_action = predict_action(
+ observation, policy, get_safe_torch_device(policy.config.device), policy.config.use_amp
+ )
# Action can eventually be clipped using `max_relative_target`,
# so action actually sent is saved in the dataset.
action = robot.send_action(pred_action)
diff --git a/lerobot/common/robot_devices/motors/configs.py b/lerobot/common/robot_devices/motors/configs.py
index 37b781f9..0bfbaf83 100644
--- a/lerobot/common/robot_devices/motors/configs.py
+++ b/lerobot/common/robot_devices/motors/configs.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import abc
from dataclasses import dataclass
diff --git a/lerobot/common/robot_devices/motors/dynamixel.py b/lerobot/common/robot_devices/motors/dynamixel.py
index 17ea933d..6096ceb5 100644
--- a/lerobot/common/robot_devices/motors/dynamixel.py
+++ b/lerobot/common/robot_devices/motors/dynamixel.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import enum
import logging
import math
@@ -318,7 +332,7 @@ class DynamixelMotorsBus:
)
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
@@ -342,7 +356,7 @@ class DynamixelMotorsBus:
def reconnect(self):
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
@@ -632,7 +646,7 @@ class DynamixelMotorsBus:
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
@@ -677,7 +691,7 @@ class DynamixelMotorsBus:
start_time = time.perf_counter()
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
@@ -743,7 +757,7 @@ class DynamixelMotorsBus:
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
@@ -779,7 +793,7 @@ class DynamixelMotorsBus:
start_time = time.perf_counter()
if self.mock:
- import tests.mock_dynamixel_sdk as dxl
+ import tests.motors.mock_dynamixel_sdk as dxl
else:
import dynamixel_sdk as dxl
diff --git a/lerobot/common/robot_devices/motors/feetech.py b/lerobot/common/robot_devices/motors/feetech.py
index cec36d37..64c7f413 100644
--- a/lerobot/common/robot_devices/motors/feetech.py
+++ b/lerobot/common/robot_devices/motors/feetech.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import enum
import logging
import math
@@ -299,7 +313,7 @@ class FeetechMotorsBus:
)
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
@@ -323,7 +337,7 @@ class FeetechMotorsBus:
def reconnect(self):
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
@@ -650,7 +664,7 @@ class FeetechMotorsBus:
def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY):
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
@@ -688,7 +702,7 @@ class FeetechMotorsBus:
def read(self, data_name, motor_names: str | list[str] | None = None):
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
@@ -768,7 +782,7 @@ class FeetechMotorsBus:
def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY):
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
@@ -804,7 +818,7 @@ class FeetechMotorsBus:
start_time = time.perf_counter()
if self.mock:
- import tests.mock_scservo_sdk as scs
+ import tests.motors.mock_scservo_sdk as scs
else:
import scservo_sdk as scs
diff --git a/lerobot/common/robot_devices/motors/utils.py b/lerobot/common/robot_devices/motors/utils.py
index fc64f050..bd86f4c6 100644
--- a/lerobot/common/robot_devices/motors/utils.py
+++ b/lerobot/common/robot_devices/motors/utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from typing import Protocol
from lerobot.common.robot_devices.motors.configs import (
diff --git a/lerobot/common/robot_devices/robots/configs.py b/lerobot/common/robot_devices/robots/configs.py
index 88cb4e6f..e940b442 100644
--- a/lerobot/common/robot_devices/robots/configs.py
+++ b/lerobot/common/robot_devices/robots/configs.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import abc
from dataclasses import dataclass, field
from typing import Sequence
diff --git a/lerobot/common/robot_devices/robots/dynamixel_calibration.py b/lerobot/common/robot_devices/robots/dynamixel_calibration.py
index 142d5794..98fe8754 100644
--- a/lerobot/common/robot_devices/robots/dynamixel_calibration.py
+++ b/lerobot/common/robot_devices/robots/dynamixel_calibration.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""Logic to calibrate a robot arm built with dynamixel motors"""
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
diff --git a/lerobot/common/robot_devices/robots/feetech_calibration.py b/lerobot/common/robot_devices/robots/feetech_calibration.py
index d779cd44..2c1e7180 100644
--- a/lerobot/common/robot_devices/robots/feetech_calibration.py
+++ b/lerobot/common/robot_devices/robots/feetech_calibration.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""Logic to calibrate a robot arm built with feetech motors"""
# TODO(rcadene, aliberts): move this logic into the robot code when refactoring
diff --git a/lerobot/common/robot_devices/robots/lekiwi_remote.py b/lerobot/common/robot_devices/robots/lekiwi_remote.py
index fd9491fa..7bf52d21 100644
--- a/lerobot/common/robot_devices/robots/lekiwi_remote.py
+++ b/lerobot/common/robot_devices/robots/lekiwi_remote.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import base64
import json
import threading
diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py
index 62e5416e..8a7c7fe6 100644
--- a/lerobot/common/robot_devices/robots/manipulator.py
+++ b/lerobot/common/robot_devices/robots/manipulator.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
"""Contains logic to instantiate a robot, read information from its motors and cameras,
and send orders to its motors.
"""
diff --git a/lerobot/common/robot_devices/robots/mobile_manipulator.py b/lerobot/common/robot_devices/robots/mobile_manipulator.py
index c2cad227..385e218b 100644
--- a/lerobot/common/robot_devices/robots/mobile_manipulator.py
+++ b/lerobot/common/robot_devices/robots/mobile_manipulator.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import base64
import json
import os
diff --git a/lerobot/common/robot_devices/robots/utils.py b/lerobot/common/robot_devices/robots/utils.py
index 47e2519b..dab514d5 100644
--- a/lerobot/common/robot_devices/robots/utils.py
+++ b/lerobot/common/robot_devices/robots/utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from typing import Protocol
from lerobot.common.robot_devices.robots.configs import (
diff --git a/lerobot/common/robot_devices/utils.py b/lerobot/common/robot_devices/utils.py
index 19bb637e..837c9d2e 100644
--- a/lerobot/common/robot_devices/utils.py
+++ b/lerobot/common/robot_devices/utils.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import platform
import time
diff --git a/lerobot/common/utils/hub.py b/lerobot/common/utils/hub.py
index 63fcf918..df7435c0 100644
--- a/lerobot/common/utils/hub.py
+++ b/lerobot/common/utils/hub.py
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Type, TypeVar
diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py
index cd26f04b..563a7b81 100644
--- a/lerobot/common/utils/utils.py
+++ b/lerobot/common/utils/utils.py
@@ -51,8 +51,10 @@ def auto_select_torch_device() -> torch.device:
return torch.device("cpu")
+# TODO(Steven): Remove log. log shouldn't be an argument, this should be handled by the logger level
def get_safe_torch_device(try_device: str, log: bool = False) -> torch.device:
"""Given a string, return a torch.device with checks on whether the device is available."""
+ try_device = str(try_device)
match try_device:
case "cuda":
assert torch.cuda.is_available()
@@ -85,6 +87,7 @@ def get_safe_dtype(dtype: torch.dtype, device: str | torch.device):
def is_torch_device_available(try_device: str) -> bool:
+ try_device = str(try_device) # Ensure try_device is a string
if try_device == "cuda":
return torch.cuda.is_available()
elif try_device == "mps":
@@ -92,7 +95,7 @@ def is_torch_device_available(try_device: str) -> bool:
elif try_device == "cpu":
return True
else:
- raise ValueError(f"Unknown device '{try_device}.")
+ raise ValueError(f"Unknown device {try_device}. Supported devices are: cuda, mps or cpu.")
def is_amp_available(device: str):
diff --git a/lerobot/configs/eval.py b/lerobot/configs/eval.py
index 11873352..16b35291 100644
--- a/lerobot/configs/eval.py
+++ b/lerobot/configs/eval.py
@@ -1,14 +1,26 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import datetime as dt
import logging
from dataclasses import dataclass, field
from pathlib import Path
from lerobot.common import envs, policies # noqa: F401
-from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
from lerobot.configs import parser
from lerobot.configs.default import EvalConfig
from lerobot.configs.policies import PreTrainedConfig
-from lerobot.configs.train import TrainPipelineConfig
@dataclass
@@ -21,11 +33,6 @@ class EvalPipelineConfig:
policy: PreTrainedConfig | None = None
output_dir: Path | None = None
job_name: str | None = None
- # TODO(rcadene, aliberts): By default, use device and use_amp values from policy checkpoint.
- device: str | None = None # cuda | cpu | mps
- # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
- # automatic gradient scaling is used.
- use_amp: bool = False
seed: int | None = 1000
def __post_init__(self):
@@ -36,27 +43,6 @@ class EvalPipelineConfig:
self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides)
self.policy.pretrained_path = policy_path
- # When no device or use_amp are given, use the one from training config.
- if self.device is None or self.use_amp is None:
- train_cfg = TrainPipelineConfig.from_pretrained(policy_path)
- if self.device is None:
- self.device = train_cfg.device
- if self.use_amp is None:
- self.use_amp = train_cfg.use_amp
-
- # Automatically switch to available device if necessary
- if not is_torch_device_available(self.device):
- auto_device = auto_select_torch_device()
- logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
- self.device = auto_device
-
- # Automatically deactivate AMP if necessary
- if self.use_amp and not is_amp_available(self.device):
- logging.warning(
- f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
- )
- self.use_amp = False
-
else:
logging.warning(
"No pretrained path was provided, evaluated policy will be built from scratch (random weights)."
@@ -73,11 +59,6 @@ class EvalPipelineConfig:
eval_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}"
self.output_dir = Path("outputs/eval") / eval_dir
- if self.device is None:
- raise ValueError("Set one of the following device: cuda, cpu or mps")
- elif self.device == "cuda" and self.use_amp is None:
- raise ValueError("Set 'use_amp' to True or False.")
-
@classmethod
def __get_path_fields__(cls) -> list[str]:
"""This enables the parser to load config from the policy using `--policy.path=local/dir`"""
diff --git a/lerobot/configs/parser.py b/lerobot/configs/parser.py
index ee784877..39e31515 100644
--- a/lerobot/configs/parser.py
+++ b/lerobot/configs/parser.py
@@ -1,4 +1,19 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
import inspect
+import pkgutil
import sys
from argparse import ArgumentError
from functools import wraps
@@ -10,6 +25,7 @@ import draccus
from lerobot.common.utils.utils import has_method
PATH_KEY = "path"
+PLUGIN_DISCOVERY_SUFFIX = "discover_packages_path"
draccus.set_config_type("json")
@@ -45,6 +61,86 @@ def parse_arg(arg_name: str, args: Sequence[str] | None = None) -> str | None:
return None
+def parse_plugin_args(plugin_arg_suffix: str, args: Sequence[str]) -> dict:
+ """Parse plugin-related arguments from command-line arguments.
+
+ This function extracts arguments from command-line arguments that match a specified suffix pattern.
+ It processes arguments in the format '--key=value' and returns them as a dictionary.
+
+ Args:
+ plugin_arg_suffix (str): The suffix to identify plugin-related arguments.
+ cli_args (Sequence[str]): A sequence of command-line arguments to parse.
+
+ Returns:
+ dict: A dictionary containing the parsed plugin arguments where:
+ - Keys are the argument names (with '--' prefix removed if present)
+ - Values are the corresponding argument values
+
+ Example:
+ >>> args = ['--env.discover_packages_path=my_package',
+ ... '--other_arg=value']
+ >>> parse_plugin_args('discover_packages_path', args)
+ {'env.discover_packages_path': 'my_package'}
+ """
+ plugin_args = {}
+ for arg in args:
+ if "=" in arg and plugin_arg_suffix in arg:
+ key, value = arg.split("=", 1)
+ # Remove leading '--' if present
+ if key.startswith("--"):
+ key = key[2:]
+ plugin_args[key] = value
+ return plugin_args
+
+
+class PluginLoadError(Exception):
+ """Raised when a plugin fails to load."""
+
+
+def load_plugin(plugin_path: str) -> None:
+ """Load and initialize a plugin from a given Python package path.
+
+ This function attempts to load a plugin by importing its package and any submodules.
+ Plugin registration is expected to happen during package initialization, i.e. when
+ the package is imported the gym environment should be registered and the config classes
+ registered with their parents using the `register_subclass` decorator.
+
+ Args:
+ plugin_path (str): The Python package path to the plugin (e.g. "mypackage.plugins.myplugin")
+
+ Raises:
+ PluginLoadError: If the plugin cannot be loaded due to import errors or if the package path is invalid.
+
+ Examples:
+ >>> load_plugin("external_plugin.core") # Loads plugin from external package
+
+ Notes:
+ - The plugin package should handle its own registration during import
+ - All submodules in the plugin package will be imported
+ - Implementation follows the plugin discovery pattern from Python packaging guidelines
+
+ See Also:
+ https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/
+ """
+ try:
+ package_module = importlib.import_module(plugin_path, __package__)
+ except (ImportError, ModuleNotFoundError) as e:
+ raise PluginLoadError(
+ f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}"
+ ) from e
+
+ def iter_namespace(ns_pkg):
+ return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".")
+
+ try:
+ for _finder, pkg_name, _ispkg in iter_namespace(package_module):
+ importlib.import_module(pkg_name)
+ except ImportError as e:
+ raise PluginLoadError(
+ f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}"
+ ) from e
+
+
def get_path_arg(field_name: str, args: Sequence[str] | None = None) -> str | None:
return parse_arg(f"{field_name}.{PATH_KEY}", args)
@@ -92,10 +188,13 @@ def filter_path_args(fields_to_filter: str | list[str], args: Sequence[str] | No
def wrap(config_path: Path | None = None):
"""
- HACK: Similar to draccus.wrap but does two additional things:
+ HACK: Similar to draccus.wrap but does three additional things:
- Will remove '.path' arguments from CLI in order to process them later on.
- If a 'config_path' is passed and the main config class has a 'from_pretrained' method, will
initialize it from there to allow to fetch configs from the hub directly
+ - Will load plugins specified in the CLI arguments. These plugins will typically register
+ their own subclasses of config classes, so that draccus can find the right class to instantiate
+ from the CLI '.type' arguments
"""
def wrapper_outer(fn):
@@ -108,6 +207,14 @@ def wrap(config_path: Path | None = None):
args = args[1:]
else:
cli_args = sys.argv[1:]
+ plugin_args = parse_plugin_args(PLUGIN_DISCOVERY_SUFFIX, cli_args)
+ for plugin_cli_arg, plugin_path in plugin_args.items():
+ try:
+ load_plugin(plugin_path)
+ except PluginLoadError as e:
+ # add the relevant CLI arg to the error message
+ raise PluginLoadError(f"{e}\nFailed plugin CLI Arg: {plugin_cli_arg}") from e
+ cli_args = filter_arg(plugin_cli_arg, cli_args)
config_path_cli = parse_arg("config_path", cli_args)
if has_method(argtype, "__get_path_fields__"):
path_fields = argtype.__get_path_fields__()
diff --git a/lerobot/configs/policies.py b/lerobot/configs/policies.py
index 9b5a7c5c..022d1fb5 100644
--- a/lerobot/configs/policies.py
+++ b/lerobot/configs/policies.py
@@ -1,4 +1,18 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import abc
+import logging
import os
from dataclasses import dataclass, field
from pathlib import Path
@@ -12,6 +26,7 @@ from huggingface_hub.errors import HfHubHTTPError
from lerobot.common.optim.optimizers import OptimizerConfig
from lerobot.common.optim.schedulers import LRSchedulerConfig
from lerobot.common.utils.hub import HubMixin
+from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
# Generic variable that is either PreTrainedConfig or a subclass thereof
@@ -40,8 +55,24 @@ class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC):
input_features: dict[str, PolicyFeature] = field(default_factory=dict)
output_features: dict[str, PolicyFeature] = field(default_factory=dict)
+ device: str | None = None # cuda | cpu | mp
+ # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
+ # automatic gradient scaling is used.
+ use_amp: bool = False
+
def __post_init__(self):
self.pretrained_path = None
+ if not self.device or not is_torch_device_available(self.device):
+ auto_device = auto_select_torch_device()
+ logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.")
+ self.device = auto_device.type
+
+ # Automatically deactivate AMP if necessary
+ if self.use_amp and not is_amp_available(self.device):
+ logging.warning(
+ f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
+ )
+ self.use_amp = False
@property
def type(self) -> str:
diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py
index 464c11f9..2b147a5b 100644
--- a/lerobot/configs/train.py
+++ b/lerobot/configs/train.py
@@ -1,5 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import datetime as dt
-import logging
import os
from dataclasses import dataclass, field
from pathlib import Path
@@ -13,7 +25,6 @@ from lerobot.common import envs
from lerobot.common.optim import OptimizerConfig
from lerobot.common.optim.schedulers import LRSchedulerConfig
from lerobot.common.utils.hub import HubMixin
-from lerobot.common.utils.utils import auto_select_torch_device, is_amp_available
from lerobot.configs import parser
from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig
from lerobot.configs.policies import PreTrainedConfig
@@ -35,10 +46,6 @@ class TrainPipelineConfig(HubMixin):
# Note that when resuming a run, the default behavior is to use the configuration from the checkpoint,
# regardless of what's provided with the training command at the time of resumption.
resume: bool = False
- device: str | None = None # cuda | cpu | mp
- # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP,
- # automatic gradient scaling is used.
- use_amp: bool = False
# `seed` is used for training (eg: model initialization, dataset shuffling)
# AND for the evaluation environments.
seed: int | None = 1000
@@ -61,18 +68,6 @@ class TrainPipelineConfig(HubMixin):
self.checkpoint_path = None
def validate(self):
- if not self.device:
- logging.warning("No device specified, trying to infer device automatically")
- device = auto_select_torch_device()
- self.device = device.type
-
- # Automatically deactivate AMP if necessary
- if self.use_amp and not is_amp_available(self.device):
- logging.warning(
- f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP."
- )
- self.use_amp = False
-
# HACK: We parse again the cli args here to get the pretrained paths if there was some.
policy_path = parser.get_path_arg("policy")
if policy_path:
diff --git a/lerobot/configs/types.py b/lerobot/configs/types.py
index 0ca45a19..6b3d92e8 100644
--- a/lerobot/configs/types.py
+++ b/lerobot/configs/types.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
# Note: We subclass str so that serialization is straightforward
# https://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json
from dataclasses import dataclass
diff --git a/lerobot/scripts/configure_motor.py b/lerobot/scripts/configure_motor.py
index f7e07070..b0dc8a97 100644
--- a/lerobot/scripts/configure_motor.py
+++ b/lerobot/scripts/configure_motor.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
This script configure a single motor at a time to a given ID and baudrate.
diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py
index ab5d0e8a..3c3c43f9 100644
--- a/lerobot/scripts/control_robot.py
+++ b/lerobot/scripts/control_robot.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Utilities to control a robot.
@@ -254,7 +267,7 @@ def record(
)
# Load pretrained policy
- policy = None if cfg.policy is None else make_policy(cfg.policy, cfg.device, ds_meta=dataset.meta)
+ policy = None if cfg.policy is None else make_policy(cfg.policy, ds_meta=dataset.meta)
if not robot.is_connected:
robot.connect()
@@ -285,8 +298,6 @@ def record(
episode_time_s=cfg.episode_time_s,
display_cameras=cfg.display_cameras,
policy=policy,
- device=cfg.device,
- use_amp=cfg.use_amp,
fps=cfg.fps,
single_task=cfg.single_task,
)
diff --git a/lerobot/scripts/control_sim_robot.py b/lerobot/scripts/control_sim_robot.py
index 49a88d14..5347822c 100644
--- a/lerobot/scripts/control_sim_robot.py
+++ b/lerobot/scripts/control_sim_robot.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Utilities to control a robot in simulation.
diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py
index 47225993..d7a4201f 100644
--- a/lerobot/scripts/eval.py
+++ b/lerobot/scripts/eval.py
@@ -458,7 +458,7 @@ def eval_main(cfg: EvalPipelineConfig):
logging.info(pformat(asdict(cfg)))
# Check device is available
- device = get_safe_torch_device(cfg.device, log=True)
+ device = get_safe_torch_device(cfg.policy.device, log=True)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
@@ -470,14 +470,14 @@ def eval_main(cfg: EvalPipelineConfig):
env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs)
logging.info("Making policy.")
+
policy = make_policy(
cfg=cfg.policy,
- device=device,
env_cfg=cfg.env,
)
policy.eval()
- with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext():
+ with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext():
info = eval_policy(
env,
policy,
diff --git a/lerobot/scripts/find_motors_bus_port.py b/lerobot/scripts/find_motors_bus_port.py
index 67b92ad7..68f2315d 100644
--- a/lerobot/scripts/find_motors_bus_port.py
+++ b/lerobot/scripts/find_motors_bus_port.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import os
import time
from pathlib import Path
diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py
index e36c697a..f2b1e29e 100644
--- a/lerobot/scripts/train.py
+++ b/lerobot/scripts/train.py
@@ -120,7 +120,7 @@ def train(cfg: TrainPipelineConfig):
set_seed(cfg.seed)
# Check device is available
- device = get_safe_torch_device(cfg.device, log=True)
+ device = get_safe_torch_device(cfg.policy.device, log=True)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
@@ -138,13 +138,12 @@ def train(cfg: TrainPipelineConfig):
logging.info("Creating policy")
policy = make_policy(
cfg=cfg.policy,
- device=device,
ds_meta=dataset.meta,
)
logging.info("Creating optimizer and scheduler")
optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy)
- grad_scaler = GradScaler(device, enabled=cfg.use_amp)
+ grad_scaler = GradScaler(device.type, enabled=cfg.policy.use_amp)
step = 0 # number of policy updates (forward + backward + optim)
@@ -218,7 +217,7 @@ def train(cfg: TrainPipelineConfig):
cfg.optimizer.grad_clip_norm,
grad_scaler=grad_scaler,
lr_scheduler=lr_scheduler,
- use_amp=cfg.use_amp,
+ use_amp=cfg.policy.use_amp,
)
# Note: eval and checkpoint happens *after* the `step`th training update has completed, so we
@@ -249,7 +248,10 @@ def train(cfg: TrainPipelineConfig):
if cfg.env and is_eval_step:
step_id = get_step_identifier(step, cfg.steps)
logging.info(f"Eval policy at step {step}")
- with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext():
+ with (
+ torch.no_grad(),
+ torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(),
+ ):
eval_info = eval_policy(
eval_env,
policy,
diff --git a/lerobot/scripts/visualize_dataset.py b/lerobot/scripts/visualize_dataset.py
index 11feb1af..cdfea6b8 100644
--- a/lerobot/scripts/visualize_dataset.py
+++ b/lerobot/scripts/visualize_dataset.py
@@ -265,13 +265,25 @@ def main():
),
)
+ parser.add_argument(
+ "--tolerance-s",
+ type=float,
+ default=1e-4,
+ help=(
+ "Tolerance in seconds used to ensure data timestamps respect the dataset fps value"
+ "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument"
+ "If not given, defaults to 1e-4."
+ ),
+ )
+
args = parser.parse_args()
kwargs = vars(args)
repo_id = kwargs.pop("repo_id")
root = kwargs.pop("root")
+ tolerance_s = kwargs.pop("tolerance_s")
logging.info("Loading dataset")
- dataset = LeRobotDataset(repo_id, root=root)
+ dataset = LeRobotDataset(repo_id, root=root, tolerance_s=tolerance_s)
visualize_dataset(dataset, **vars(args))
diff --git a/lerobot/scripts/visualize_dataset_html.py b/lerobot/scripts/visualize_dataset_html.py
index d5825aa6..0fc21a8f 100644
--- a/lerobot/scripts/visualize_dataset_html.py
+++ b/lerobot/scripts/visualize_dataset_html.py
@@ -234,7 +234,7 @@ def get_episode_data(dataset: LeRobotDataset | IterableNamespace, episode_index)
This file will be loaded by Dygraph javascript to plot data in real time."""
columns = []
- selected_columns = [col for col, ft in dataset.features.items() if ft["dtype"] == "float32"]
+ selected_columns = [col for col, ft in dataset.features.items() if ft["dtype"] in ["float32", "int32"]]
selected_columns.remove("timestamp")
ignored_columns = []
@@ -446,15 +446,31 @@ def main():
help="Delete the output directory if it exists already.",
)
+ parser.add_argument(
+ "--tolerance-s",
+ type=float,
+ default=1e-4,
+ help=(
+ "Tolerance in seconds used to ensure data timestamps respect the dataset fps value"
+ "This is argument passed to the constructor of LeRobotDataset and maps to its tolerance_s constructor argument"
+ "If not given, defaults to 1e-4."
+ ),
+ )
+
args = parser.parse_args()
kwargs = vars(args)
repo_id = kwargs.pop("repo_id")
load_from_hf_hub = kwargs.pop("load_from_hf_hub")
root = kwargs.pop("root")
+ tolerance_s = kwargs.pop("tolerance_s")
dataset = None
if repo_id:
- dataset = LeRobotDataset(repo_id, root=root) if not load_from_hf_hub else get_dataset_info(repo_id)
+ dataset = (
+ LeRobotDataset(repo_id, root=root, tolerance_s=tolerance_s)
+ if not load_from_hf_hub
+ else get_dataset_info(repo_id)
+ )
visualize_dataset_html(dataset, **vars(args))
diff --git a/pyproject.toml b/pyproject.toml
index 4212ef57..10305fee 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,3 +1,17 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
[project.urls]
homepage = "https://github.com/huggingface/lerobot"
issues = "https://github.com/huggingface/lerobot/issues"
@@ -8,18 +22,19 @@ name = "lerobot"
version = "0.1.0"
description = "🤗 LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch"
authors = [
- {name = "Rémi Cadène", email = "re.cadene@gmail.com"},
- {name = "Simon Alibert", email = "alibert.sim@gmail.com"},
- {name = "Alexander Soare", email = "alexander.soare159@gmail.com"},
- {name = "Quentin Gallouédec", email = "quentin.gallouedec@ec-lyon.fr"},
- {name = "Adil Zouitine", email = "adilzouitinegm@gmail.com"},
- {name = "Thomas Wolf", email = "thomaswolfcontact@gmail.com"},
+ { name = "Rémi Cadène", email = "re.cadene@gmail.com" },
+ { name = "Simon Alibert", email = "alibert.sim@gmail.com" },
+ { name = "Alexander Soare", email = "alexander.soare159@gmail.com" },
+ { name = "Quentin Gallouédec", email = "quentin.gallouedec@ec-lyon.fr" },
+ { name = "Adil Zouitine", email = "adilzouitinegm@gmail.com" },
+ { name = "Thomas Wolf", email = "thomaswolfcontact@gmail.com" },
+ { name = "Steven Palma", email = "imstevenpmwork@ieee.org" },
]
readme = "README.md"
-license = {text = "Apache-2.0"}
+license = { text = "Apache-2.0" }
requires-python = ">=3.10"
keywords = ["robotics", "deep learning", "pytorch"]
-classifiers=[
+classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
@@ -38,10 +53,9 @@ dependencies = [
"einops>=0.8.0",
"flask>=3.0.3",
"gdown>=5.1.0",
- "gymnasium==0.29.1", # TODO(rcadene, aliberts): Make gym 1.0.0 work
+ "gymnasium==0.29.1", # TODO(rcadene, aliberts): Make gym 1.0.0 work
"h5py>=3.10.0",
"huggingface-hub[hf-transfer,cli]>=0.27.1 ; python_version < '4.0'",
- "hydra-core>=1.3.2",
"imageio[ffmpeg]>=2.34.0",
"jsonlines>=4.0.0",
"numba>=0.59.0",
@@ -55,6 +69,7 @@ dependencies = [
"rerun-sdk>=0.21.0",
"termcolor>=2.4.0",
"torch>=2.2.1",
+ "torchcodec>=0.2.1",
"torchvision>=0.21.0",
"wandb>=0.16.3",
"zarr>=2.17.0",
@@ -63,7 +78,9 @@ dependencies = [
[project.optional-dependencies]
aloha = ["gym-aloha>=0.1.1 ; python_version < '4.0'"]
dev = ["pre-commit>=3.7.0", "debugpy>=1.8.1"]
-dora = ["gym-dora @ git+https://github.com/dora-rs/dora-lerobot.git#subdirectory=gym_dora ; python_version < '4.0'"]
+dora = [
+ "gym-dora @ git+https://github.com/dora-rs/dora-lerobot.git#subdirectory=gym_dora ; python_version < '4.0'",
+]
dynamixel = ["dynamixel-sdk>=3.7.31", "pynput>=1.7.7"]
feetech = ["feetech-servo-sdk>=1.0.0", "pynput>=1.7.7"]
intelrealsense = ["pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'"]
@@ -74,7 +91,7 @@ stretch = [
"hello-robot-stretch-body>=0.7.27 ; python_version < '4.0' and sys_platform == 'linux'",
"pyrender @ git+https://github.com/mmatl/pyrender.git ; sys_platform == 'linux'",
"pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'",
- "pynput>=1.7.7"
+ "pynput>=1.7.7",
]
test = ["pytest>=8.1.0", "pytest-cov>=5.0.0", "pyserial>=3.5"]
umi = ["imagecodecs>=2024.1.1"]
@@ -87,30 +104,7 @@ requires-poetry = ">=2.1"
[tool.ruff]
line-length = 110
target-version = "py310"
-exclude = [
- "tests/data",
- ".bzr",
- ".direnv",
- ".eggs",
- ".git",
- ".git-rewrite",
- ".hg",
- ".mypy_cache",
- ".nox",
- ".pants.d",
- ".pytype",
- ".ruff_cache",
- ".svn",
- ".tox",
- ".venv",
- "__pypackages__",
- "_build",
- "buck-out",
- "build",
- "dist",
- "node_modules",
- "venv",
-]
+exclude = ["tests/artifacts/**/*.safetensors"]
[tool.ruff.lint]
select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"]
@@ -128,8 +122,8 @@ skips = ["B101", "B311", "B404", "B603"]
[tool.typos]
default.extend-ignore-re = [
- "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line
- "(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on" # spellchecker:
+ "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # spellchecker:disable-line
+ "(?s)(#|//)\\s*spellchecker:off.*?\\n\\s*(#|//)\\s*spellchecker:on", # spellchecker:
]
default.extend-ignore-identifiers-re = [
# Add individual words here to ignore them
diff --git a/tests/__init__.py b/tests/__init__.py
index e69de29b..f52df1bd 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_0.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_0.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_0.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_0.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_1.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_1.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_1.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_1.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_250.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_250.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_250.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_250.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_251.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_251.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_251.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_251.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_498.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_498.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_498.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_498.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_499.safetensors b/tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_499.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/aloha_sim_insertion_human/frame_499.safetensors
rename to tests/artifacts/datasets/lerobot/aloha_sim_insertion_human/frame_499.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_0.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_0.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_0.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_0.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_1.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_1.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_1.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_1.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_159.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_159.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_159.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_159.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_160.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_160.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_160.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_160.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_80.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_80.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_80.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_80.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_81.safetensors b/tests/artifacts/datasets/lerobot/pusht/frame_81.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/pusht/frame_81.safetensors
rename to tests/artifacts/datasets/lerobot/pusht/frame_81.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_0.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_0.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_0.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_0.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_1.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_1.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_1.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_1.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_12.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_12.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_12.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_12.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_13.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_13.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_13.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_13.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_23.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_23.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_23.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_23.safetensors
diff --git a/tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_24.safetensors b/tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_24.safetensors
similarity index 100%
rename from tests/data/save_dataset_to_safetensors/lerobot/xarm_lift_medium/frame_24.safetensors
rename to tests/artifacts/datasets/lerobot/xarm_lift_medium/frame_24.safetensors
diff --git a/tests/scripts/save_dataset_to_safetensors.py b/tests/artifacts/datasets/save_dataset_to_safetensors.py
similarity index 95%
rename from tests/scripts/save_dataset_to_safetensors.py
rename to tests/artifacts/datasets/save_dataset_to_safetensors.py
index 3b77348c..74d42a3d 100644
--- a/tests/scripts/save_dataset_to_safetensors.py
+++ b/tests/artifacts/datasets/save_dataset_to_safetensors.py
@@ -23,7 +23,7 @@ If you know that your change will break backward compatibility, you should write
doesnt need to be merged into the `main` branch. Then you need to run this script and update the tests artifacts.
Example usage:
- `python tests/scripts/save_dataset_to_safetensors.py`
+ `python tests/artifacts/datasets/save_dataset_to_safetensors.py`
"""
import shutil
@@ -88,4 +88,4 @@ if __name__ == "__main__":
"lerobot/nyu_franka_play_dataset",
"lerobot/cmu_stretch",
]:
- save_dataset_to_safetensors("tests/data/save_dataset_to_safetensors", repo_id=dataset)
+ save_dataset_to_safetensors("tests/artifacts/datasets", repo_id=dataset)
diff --git a/tests/data/save_image_transforms_to_safetensors/default_transforms.safetensors b/tests/artifacts/image_transforms/default_transforms.safetensors
similarity index 100%
rename from tests/data/save_image_transforms_to_safetensors/default_transforms.safetensors
rename to tests/artifacts/image_transforms/default_transforms.safetensors
diff --git a/tests/scripts/save_image_transforms_to_safetensors.py b/tests/artifacts/image_transforms/save_image_transforms_to_safetensors.py
similarity index 97%
rename from tests/scripts/save_image_transforms_to_safetensors.py
rename to tests/artifacts/image_transforms/save_image_transforms_to_safetensors.py
index bd2c3add..7b037af4 100644
--- a/tests/scripts/save_image_transforms_to_safetensors.py
+++ b/tests/artifacts/image_transforms/save_image_transforms_to_safetensors.py
@@ -27,7 +27,7 @@ from lerobot.common.datasets.transforms import (
)
from lerobot.common.utils.random_utils import seeded_context
-ARTIFACT_DIR = Path("tests/data/save_image_transforms_to_safetensors")
+ARTIFACT_DIR = Path("tests/artifacts/image_transforms")
DATASET_REPO_ID = "lerobot/aloha_mobile_shrimp"
diff --git a/tests/data/save_image_transforms_to_safetensors/single_transforms.safetensors b/tests/artifacts/image_transforms/single_transforms.safetensors
similarity index 100%
rename from tests/data/save_image_transforms_to_safetensors/single_transforms.safetensors
rename to tests/artifacts/image_transforms/single_transforms.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/actions.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_/actions.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/actions.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_/actions.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/grad_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_/grad_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/grad_stats.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_/grad_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/output_dict.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_/output_dict.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/output_dict.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_/output_dict.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/param_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_/param_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_/param_stats.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_/param_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/actions.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/actions.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/actions.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/grad_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/output_dict.safetensors
diff --git a/tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors b/tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors
rename to tests/artifacts/policies/aloha_sim_insertion_human_act_1000_steps/param_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/pusht_diffusion_/actions.safetensors b/tests/artifacts/policies/pusht_diffusion_/actions.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/pusht_diffusion_/actions.safetensors
rename to tests/artifacts/policies/pusht_diffusion_/actions.safetensors
diff --git a/tests/data/save_policy_to_safetensors/pusht_diffusion_/grad_stats.safetensors b/tests/artifacts/policies/pusht_diffusion_/grad_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/pusht_diffusion_/grad_stats.safetensors
rename to tests/artifacts/policies/pusht_diffusion_/grad_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/pusht_diffusion_/output_dict.safetensors b/tests/artifacts/policies/pusht_diffusion_/output_dict.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/pusht_diffusion_/output_dict.safetensors
rename to tests/artifacts/policies/pusht_diffusion_/output_dict.safetensors
diff --git a/tests/data/save_policy_to_safetensors/pusht_diffusion_/param_stats.safetensors b/tests/artifacts/policies/pusht_diffusion_/param_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/pusht_diffusion_/param_stats.safetensors
rename to tests/artifacts/policies/pusht_diffusion_/param_stats.safetensors
diff --git a/tests/scripts/save_policy_to_safetensors.py b/tests/artifacts/policies/save_policy_to_safetensors.py
similarity index 96%
rename from tests/scripts/save_policy_to_safetensors.py
rename to tests/artifacts/policies/save_policy_to_safetensors.py
index 03726163..106f0dc0 100644
--- a/tests/scripts/save_policy_to_safetensors.py
+++ b/tests/artifacts/policies/save_policy_to_safetensors.py
@@ -33,12 +33,11 @@ def get_policy_stats(ds_repo_id: str, policy_name: str, policy_kwargs: dict):
# TODO(rcadene, aliberts): remove dataset download
dataset=DatasetConfig(repo_id=ds_repo_id, episodes=[0]),
policy=make_policy_config(policy_name, **policy_kwargs),
- device="cpu",
)
train_cfg.validate() # Needed for auto-setting some parameters
dataset = make_dataset(train_cfg)
- policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=train_cfg.device)
+ policy = make_policy(train_cfg.policy, ds_meta=dataset.meta)
policy.train()
optimizer, _ = make_optimizer_and_scheduler(train_cfg, policy)
@@ -142,5 +141,5 @@ if __name__ == "__main__":
raise RuntimeError("No policies were provided!")
for ds_repo_id, policy, policy_kwargs, file_name_extra in artifacts_cfg:
ds_name = ds_repo_id.split("/")[-1]
- output_dir = Path("tests/data/save_policy_to_safetensors") / f"{ds_name}_{policy}_{file_name_extra}"
+ output_dir = Path("tests/artifacts/policies") / f"{ds_name}_{policy}_{file_name_extra}"
save_policy_to_safetensors(output_dir, ds_repo_id, policy, policy_kwargs)
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/actions.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/actions.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/actions.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/actions.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/grad_stats.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/grad_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/grad_stats.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/grad_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/output_dict.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/output_dict.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/output_dict.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/output_dict.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/param_stats.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/param_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_mpc/param_stats.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_mpc/param_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/actions.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/actions.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/actions.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/actions.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/grad_stats.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/grad_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/grad_stats.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/grad_stats.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/output_dict.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/output_dict.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/output_dict.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/output_dict.safetensors
diff --git a/tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/param_stats.safetensors b/tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/param_stats.safetensors
similarity index 100%
rename from tests/data/save_policy_to_safetensors/xarm_lift_medium_tdmpc_use_policy/param_stats.safetensors
rename to tests/artifacts/policies/xarm_lift_medium_tdmpc_use_policy/param_stats.safetensors
diff --git a/tests/mock_cv2.py b/tests/cameras/mock_cv2.py
similarity index 77%
rename from tests/mock_cv2.py
rename to tests/cameras/mock_cv2.py
index 806e35ed..eeaf859c 100644
--- a/tests/mock_cv2.py
+++ b/tests/cameras/mock_cv2.py
@@ -1,7 +1,25 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from functools import cache
import numpy as np
+CAP_V4L2 = 200
+CAP_DSHOW = 700
+CAP_AVFOUNDATION = 1200
+CAP_ANY = -1
+
CAP_PROP_FPS = 5
CAP_PROP_FRAME_WIDTH = 3
CAP_PROP_FRAME_HEIGHT = 4
diff --git a/tests/mock_pyrealsense2.py b/tests/cameras/mock_pyrealsense2.py
similarity index 83%
rename from tests/mock_pyrealsense2.py
rename to tests/cameras/mock_pyrealsense2.py
index 5a39fc2b..c477eb06 100644
--- a/tests/mock_pyrealsense2.py
+++ b/tests/cameras/mock_pyrealsense2.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import enum
import numpy as np
diff --git a/tests/test_cameras.py b/tests/cameras/test_cameras.py
similarity index 74%
rename from tests/test_cameras.py
rename to tests/cameras/test_cameras.py
index cfefc215..868358ec 100644
--- a/tests/test_cameras.py
+++ b/tests/cameras/test_cameras.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Tests for physical cameras and their mocked versions.
If the physical camera is not connected to the computer, or not working,
@@ -72,8 +85,8 @@ def test_camera(request, camera_type, mock):
camera.connect()
assert camera.is_connected
assert camera.fps is not None
- assert camera.width is not None
- assert camera.height is not None
+ assert camera.capture_width is not None
+ assert camera.capture_height is not None
# Test connecting twice raises an error
with pytest.raises(RobotDeviceAlreadyConnectedError):
@@ -133,7 +146,7 @@ def test_camera(request, camera_type, mock):
camera.connect()
if mock:
- import tests.mock_cv2 as cv2
+ import tests.cameras.mock_cv2 as cv2
else:
import cv2
@@ -191,3 +204,49 @@ def test_save_images_from_cameras(tmp_path, request, camera_type, mock):
# Small `record_time_s` to speedup unit tests
save_images_from_cameras(tmp_path, record_time_s=0.02, mock=mock)
+
+
+@pytest.mark.parametrize("camera_type, mock", TEST_CAMERA_TYPES)
+@require_camera
+def test_camera_rotation(request, camera_type, mock):
+ config_kwargs = {"camera_type": camera_type, "mock": mock, "width": 640, "height": 480, "fps": 30}
+
+ # No rotation.
+ camera = make_camera(**config_kwargs, rotation=None)
+ camera.connect()
+ assert camera.capture_width == 640
+ assert camera.capture_height == 480
+ assert camera.width == 640
+ assert camera.height == 480
+ no_rot_img = camera.read()
+ h, w, c = no_rot_img.shape
+ assert h == 480 and w == 640 and c == 3
+ camera.disconnect()
+
+ # Rotation = 90 (clockwise).
+ camera = make_camera(**config_kwargs, rotation=90)
+ camera.connect()
+ # With a 90° rotation, we expect the metadata dimensions to be swapped.
+ assert camera.capture_width == 640
+ assert camera.capture_height == 480
+ assert camera.width == 480
+ assert camera.height == 640
+ import cv2
+
+ assert camera.rotation == cv2.ROTATE_90_CLOCKWISE
+ rot_img = camera.read()
+ h, w, c = rot_img.shape
+ assert h == 640 and w == 480 and c == 3
+ camera.disconnect()
+
+ # Rotation = 180.
+ camera = make_camera(**config_kwargs, rotation=None)
+ camera.connect()
+ assert camera.capture_width == 640
+ assert camera.capture_height == 480
+ assert camera.width == 640
+ assert camera.height == 480
+ no_rot_img = camera.read()
+ h, w, c = no_rot_img.shape
+ assert h == 480 and w == 640 and c == 3
+ camera.disconnect()
diff --git a/tests/configs/test_plugin_loading.py b/tests/configs/test_plugin_loading.py
new file mode 100644
index 00000000..1a8cceed
--- /dev/null
+++ b/tests/configs/test_plugin_loading.py
@@ -0,0 +1,89 @@
+import sys
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from lerobot.common.envs.configs import EnvConfig
+from lerobot.configs.parser import PluginLoadError, load_plugin, parse_plugin_args, wrap
+
+
+def create_plugin_code(*, base_class: str = "EnvConfig", plugin_name: str = "test_env") -> str:
+ """Creates a dummy plugin module that implements its own EnvConfig subclass."""
+ return f"""
+from dataclasses import dataclass
+from lerobot.common.envs.configs import {base_class}
+
+@{base_class}.register_subclass("{plugin_name}")
+@dataclass
+class TestPluginConfig:
+ value: int = 42
+ """
+
+
+@pytest.fixture
+def plugin_dir(tmp_path: Path) -> Generator[Path, None, None]:
+ """Creates a temporary plugin package structure."""
+ plugin_pkg = tmp_path / "test_plugin"
+ plugin_pkg.mkdir()
+ (plugin_pkg / "__init__.py").touch()
+
+ with open(plugin_pkg / "my_plugin.py", "w") as f:
+ f.write(create_plugin_code())
+
+ # Add tmp_path to Python path so we can import from it
+ sys.path.insert(0, str(tmp_path))
+ yield plugin_pkg
+ sys.path.pop(0)
+
+
+def test_parse_plugin_args():
+ cli_args = [
+ "--env.type=test",
+ "--model.discover_packages_path=some.package",
+ "--env.discover_packages_path=other.package",
+ ]
+ plugin_args = parse_plugin_args("discover_packages_path", cli_args)
+ assert plugin_args == {
+ "model.discover_packages_path": "some.package",
+ "env.discover_packages_path": "other.package",
+ }
+
+
+def test_load_plugin_success(plugin_dir: Path):
+ # Import should work and register the plugin with the real EnvConfig
+ load_plugin("test_plugin")
+
+ assert "test_env" in EnvConfig.get_known_choices()
+ plugin_cls = EnvConfig.get_choice_class("test_env")
+ plugin_instance = plugin_cls()
+ assert plugin_instance.value == 42
+
+
+def test_load_plugin_failure():
+ with pytest.raises(PluginLoadError) as exc_info:
+ load_plugin("nonexistent_plugin")
+ assert "Failed to load plugin 'nonexistent_plugin'" in str(exc_info.value)
+
+
+def test_wrap_with_plugin(plugin_dir: Path):
+ @dataclass
+ class Config:
+ env: EnvConfig
+
+ @wrap()
+ def dummy_func(cfg: Config):
+ return cfg
+
+ # Test loading plugin via CLI args
+ sys.argv = [
+ "dummy_script.py",
+ "--env.discover_packages_path=test_plugin",
+ "--env.type=test_env",
+ ]
+
+ cfg = dummy_func()
+ assert isinstance(cfg, Config)
+ assert isinstance(cfg.env, EnvConfig.get_choice_class("test_env"))
+ assert cfg.env.value == 42
diff --git a/tests/test_compute_stats.py b/tests/datasets/test_compute_stats.py
similarity index 100%
rename from tests/test_compute_stats.py
rename to tests/datasets/test_compute_stats.py
diff --git a/tests/test_datasets.py b/tests/datasets/test_datasets.py
similarity index 99%
rename from tests/test_datasets.py
rename to tests/datasets/test_datasets.py
index 003a60c9..81447089 100644
--- a/tests/test_datasets.py
+++ b/tests/datasets/test_datasets.py
@@ -45,7 +45,7 @@ from lerobot.common.robot_devices.robots.utils import make_robot
from lerobot.configs.default import DatasetConfig
from lerobot.configs.train import TrainPipelineConfig
from tests.fixtures.constants import DUMMY_CHW, DUMMY_HWC, DUMMY_REPO_ID
-from tests.utils import DEVICE, require_x86_64_kernel
+from tests.utils import require_x86_64_kernel
@pytest.fixture
@@ -349,7 +349,6 @@ def test_factory(env_name, repo_id, policy_name):
dataset=DatasetConfig(repo_id=repo_id, episodes=[0]),
env=make_env_config(env_name),
policy=make_policy_config(policy_name),
- device=DEVICE,
)
dataset = make_dataset(cfg)
@@ -474,12 +473,12 @@ def test_flatten_unflatten_dict():
)
@require_x86_64_kernel
def test_backward_compatibility(repo_id):
- """The artifacts for this test have been generated by `tests/scripts/save_dataset_to_safetensors.py`."""
+ """The artifacts for this test have been generated by `tests/artifacts/datasets/save_dataset_to_safetensors.py`."""
# TODO(rcadene, aliberts): remove dataset download
dataset = LeRobotDataset(repo_id, episodes=[0])
- test_dir = Path("tests/data/save_dataset_to_safetensors") / repo_id
+ test_dir = Path("tests/artifacts/datasets") / repo_id
def load_and_compare(i):
new_frame = dataset[i] # noqa: B023
diff --git a/tests/test_delta_timestamps.py b/tests/datasets/test_delta_timestamps.py
similarity index 93%
rename from tests/test_delta_timestamps.py
rename to tests/datasets/test_delta_timestamps.py
index b27cc1eb..35014642 100644
--- a/tests/test_delta_timestamps.py
+++ b/tests/datasets/test_delta_timestamps.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from itertools import accumulate
import datasets
diff --git a/tests/test_image_transforms.py b/tests/datasets/test_image_transforms.py
similarity index 99%
rename from tests/test_image_transforms.py
rename to tests/datasets/test_image_transforms.py
index 19bd77df..352aba99 100644
--- a/tests/test_image_transforms.py
+++ b/tests/datasets/test_image_transforms.py
@@ -33,7 +33,7 @@ from lerobot.scripts.visualize_image_transforms import (
save_all_transforms,
save_each_transform,
)
-from tests.scripts.save_image_transforms_to_safetensors import ARTIFACT_DIR
+from tests.artifacts.image_transforms.save_image_transforms_to_safetensors import ARTIFACT_DIR
from tests.utils import require_x86_64_kernel
diff --git a/tests/test_image_writer.py b/tests/datasets/test_image_writer.py
similarity index 95%
rename from tests/test_image_writer.py
rename to tests/datasets/test_image_writer.py
index c7fc11f2..802fe0d3 100644
--- a/tests/test_image_writer.py
+++ b/tests/datasets/test_image_writer.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import queue
import time
from multiprocessing import queues
diff --git a/tests/test_online_buffer.py b/tests/datasets/test_online_buffer.py
similarity index 100%
rename from tests/test_online_buffer.py
rename to tests/datasets/test_online_buffer.py
diff --git a/tests/test_sampler.py b/tests/datasets/test_sampler.py
similarity index 100%
rename from tests/test_sampler.py
rename to tests/datasets/test_sampler.py
diff --git a/tests/lerobot/common/datasets/test_utils.py b/tests/datasets/test_utils.py
similarity index 65%
rename from tests/lerobot/common/datasets/test_utils.py
rename to tests/datasets/test_utils.py
index f484e1ae..0d02218a 100644
--- a/tests/lerobot/common/datasets/test_utils.py
+++ b/tests/datasets/test_utils.py
@@ -14,9 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import torch
+from datasets import Dataset
from huggingface_hub import DatasetCard
-from lerobot.common.datasets.utils import create_lerobot_dataset_card
+from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
+from lerobot.common.datasets.utils import create_lerobot_dataset_card, hf_transform_to_torch
def test_default_parameters():
@@ -36,3 +39,17 @@ def test_with_tags():
tags = ["tag1", "tag2"]
card = create_lerobot_dataset_card(tags=tags)
assert card.data.tags == ["LeRobot", "tag1", "tag2"]
+
+
+def test_calculate_episode_data_index():
+ dataset = Dataset.from_dict(
+ {
+ "timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
+ "index": [0, 1, 2, 3, 4, 5],
+ "episode_index": [0, 0, 1, 2, 2, 2],
+ },
+ )
+ dataset.set_transform(hf_transform_to_torch)
+ episode_data_index = calculate_episode_data_index(dataset)
+ assert torch.equal(episode_data_index["from"], torch.tensor([0, 2, 3]))
+ assert torch.equal(episode_data_index["to"], torch.tensor([2, 3, 6]))
diff --git a/tests/test_visualize_dataset.py b/tests/datasets/test_visualize_dataset.py
similarity index 100%
rename from tests/test_visualize_dataset.py
rename to tests/datasets/test_visualize_dataset.py
diff --git a/tests/test_envs.py b/tests/envs/test_envs.py
similarity index 98%
rename from tests/test_envs.py
rename to tests/envs/test_envs.py
index c7c384db..b318abb4 100644
--- a/tests/test_envs.py
+++ b/tests/envs/test_envs.py
@@ -23,8 +23,7 @@ from gymnasium.utils.env_checker import check_env
import lerobot
from lerobot.common.envs.factory import make_env, make_env_config
from lerobot.common.envs.utils import preprocess_observation
-
-from .utils import require_env
+from tests.utils import require_env
OBS_TYPES = ["state", "pixels", "pixels_agent_pos"]
diff --git a/tests/test_examples.py b/tests/examples/test_examples.py
similarity index 100%
rename from tests/test_examples.py
rename to tests/examples/test_examples.py
diff --git a/tests/fixtures/constants.py b/tests/fixtures/constants.py
index 3201dcf2..5e5c762c 100644
--- a/tests/fixtures/constants.py
+++ b/tests/fixtures/constants.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from lerobot.common.constants import HF_LEROBOT_HOME
LEROBOT_TEST_DIR = HF_LEROBOT_HOME / "_testing"
diff --git a/tests/fixtures/dataset_factories.py b/tests/fixtures/dataset_factories.py
index 2259e0e6..531977da 100644
--- a/tests/fixtures/dataset_factories.py
+++ b/tests/fixtures/dataset_factories.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import random
from functools import partial
from pathlib import Path
diff --git a/tests/fixtures/files.py b/tests/fixtures/files.py
index 4ef12e49..678d1f38 100644
--- a/tests/fixtures/files.py
+++ b/tests/fixtures/files.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import json
from pathlib import Path
diff --git a/tests/fixtures/hub.py b/tests/fixtures/hub.py
index ae309cb4..aa2768e4 100644
--- a/tests/fixtures/hub.py
+++ b/tests/fixtures/hub.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from pathlib import Path
import datasets
diff --git a/tests/fixtures/optimizers.py b/tests/fixtures/optimizers.py
index 1a9b9d11..65488566 100644
--- a/tests/fixtures/optimizers.py
+++ b/tests/fixtures/optimizers.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import pytest
import torch
diff --git a/tests/mock_dynamixel_sdk.py b/tests/motors/mock_dynamixel_sdk.py
similarity index 83%
rename from tests/mock_dynamixel_sdk.py
rename to tests/motors/mock_dynamixel_sdk.py
index a790dff0..ee399f96 100644
--- a/tests/mock_dynamixel_sdk.py
+++ b/tests/motors/mock_dynamixel_sdk.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""Mocked classes and functions from dynamixel_sdk to allow for continuous integration
and testing code logic that requires hardware and devices (e.g. robot arms, cameras)
diff --git a/tests/mock_scservo_sdk.py b/tests/motors/mock_scservo_sdk.py
similarity index 85%
rename from tests/mock_scservo_sdk.py
rename to tests/motors/mock_scservo_sdk.py
index ca9233b0..37f6d0d5 100644
--- a/tests/mock_scservo_sdk.py
+++ b/tests/motors/mock_scservo_sdk.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""Mocked classes and functions from dynamixel_sdk to allow for continuous integration
and testing code logic that requires hardware and devices (e.g. robot arms, cameras)
diff --git a/tests/test_motors.py b/tests/motors/test_motors.py
similarity index 88%
rename from tests/test_motors.py
rename to tests/motors/test_motors.py
index 75793636..da7a5c54 100644
--- a/tests/test_motors.py
+++ b/tests/motors/test_motors.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Tests for physical motors and their mocked versions.
If the physical motors are not connected to the computer, or not working,
diff --git a/tests/test_optimizers.py b/tests/optim/test_optimizers.py
similarity index 67%
rename from tests/test_optimizers.py
rename to tests/optim/test_optimizers.py
index cf5c5b18..997e14fe 100644
--- a/tests/test_optimizers.py
+++ b/tests/optim/test_optimizers.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import pytest
import torch
diff --git a/tests/test_schedulers.py b/tests/optim/test_schedulers.py
similarity index 80%
rename from tests/test_schedulers.py
rename to tests/optim/test_schedulers.py
index e871fee1..17637663 100644
--- a/tests/test_schedulers.py
+++ b/tests/optim/test_schedulers.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from torch.optim.lr_scheduler import LambdaLR
from lerobot.common.constants import SCHEDULER_STATE
diff --git a/tests/test_policies.py b/tests/policies/test_policies.py
similarity index 96%
rename from tests/test_policies.py
rename to tests/policies/test_policies.py
index 9dab6176..197aa732 100644
--- a/tests/test_policies.py
+++ b/tests/policies/test_policies.py
@@ -40,7 +40,7 @@ from lerobot.common.utils.random_utils import seeded_context
from lerobot.configs.default import DatasetConfig
from lerobot.configs.train import TrainPipelineConfig
from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature
-from tests.scripts.save_policy_to_safetensors import get_policy_stats
+from tests.artifacts.policies.save_policy_to_safetensors import get_policy_stats
from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel
@@ -143,12 +143,11 @@ def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs):
dataset=DatasetConfig(repo_id=ds_repo_id, episodes=[0]),
policy=make_policy_config(policy_name, **policy_kwargs),
env=make_env_config(env_name, **env_kwargs),
- device=DEVICE,
)
# Check that we can make the policy object.
dataset = make_dataset(train_cfg)
- policy = make_policy(train_cfg.policy, ds_meta=dataset.meta, device=DEVICE)
+ policy = make_policy(train_cfg.policy, ds_meta=dataset.meta)
assert isinstance(policy, PreTrainedPolicy)
# Check that we run select_actions and get the appropriate output.
@@ -214,7 +213,6 @@ def test_act_backbone_lr():
# TODO(rcadene, aliberts): remove dataset download
dataset=DatasetConfig(repo_id="lerobot/aloha_sim_insertion_scripted", episodes=[0]),
policy=make_policy_config("act", optimizer_lr=0.01, optimizer_lr_backbone=0.001),
- device=DEVICE,
)
cfg.validate() # Needed for auto-setting some parameters
@@ -222,7 +220,7 @@ def test_act_backbone_lr():
assert cfg.policy.optimizer_lr_backbone == 0.001
dataset = make_dataset(cfg)
- policy = make_policy(cfg.policy, device=DEVICE, ds_meta=dataset.meta)
+ policy = make_policy(cfg.policy, ds_meta=dataset.meta)
optimizer, _ = make_optimizer_and_scheduler(cfg, policy)
assert len(optimizer.param_groups) == 2
assert optimizer.param_groups[0]["lr"] == cfg.policy.optimizer_lr
@@ -254,10 +252,11 @@ def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name:
key: ft for key, ft in features.items() if key not in policy_cfg.output_features
}
policy = policy_cls(policy_cfg)
+ policy.to(policy_cfg.device)
save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}"
policy.save_pretrained(save_dir)
- policy_ = policy_cls.from_pretrained(save_dir, config=policy_cfg)
- assert all(torch.equal(p, p_) for p, p_ in zip(policy.parameters(), policy_.parameters(), strict=True))
+ loaded_policy = policy_cls.from_pretrained(save_dir, config=policy_cfg)
+ torch.testing.assert_close(list(policy.parameters()), list(loaded_policy.parameters()), rtol=0, atol=0)
@pytest.mark.parametrize("insert_temporal_dim", [False, True])
@@ -369,7 +368,7 @@ def test_normalize(insert_temporal_dim):
# was changed to true. For some reason, tests would pass locally, but not in CI. So here we override
# to test with `policy.use_mpc=false`.
("lerobot/xarm_lift_medium", "tdmpc", {"use_mpc": False}, "use_policy"),
- ("lerobot/xarm_lift_medium", "tdmpc", {"use_mpc": True}, "use_mpc"),
+ # ("lerobot/xarm_lift_medium", "tdmpc", {"use_mpc": True}, "use_mpc"),
# TODO(rcadene): the diffusion model was normalizing the image in mean=0.5 std=0.5 which is a hack supposed to
# to normalize the image at all. In our current codebase we dont normalize at all. But there is still a minor difference
# that fails the test. However, by testing to normalize the image with 0.5 0.5 in the current codebase, the test pass.
@@ -408,12 +407,10 @@ def test_backward_compatibility(ds_repo_id: str, policy_name: str, policy_kwargs
should be updated.
4. Check that this test now passes.
5. Remember to restore `tests/scripts/save_policy_to_safetensors.py` to its original state.
- 6. Remember to stage and commit the resulting changes to `tests/data`.
+ 6. Remember to stage and commit the resulting changes to `tests/artifacts`.
"""
ds_name = ds_repo_id.split("/")[-1]
- artifact_dir = (
- Path("tests/data/save_policy_to_safetensors") / f"{ds_name}_{policy_name}_{file_name_extra}"
- )
+ artifact_dir = Path("tests/artifacts/policies") / f"{ds_name}_{policy_name}_{file_name_extra}"
saved_output_dict = load_file(artifact_dir / "output_dict.safetensors")
saved_grad_stats = load_file(artifact_dir / "grad_stats.safetensors")
saved_param_stats = load_file(artifact_dir / "param_stats.safetensors")
diff --git a/tests/test_control_robot.py b/tests/robots/test_control_robot.py
similarity index 94%
rename from tests/test_control_robot.py
rename to tests/robots/test_control_robot.py
index 1796291f..61d1caad 100644
--- a/tests/test_control_robot.py
+++ b/tests/robots/test_control_robot.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Tests for physical robots and their mocked versions.
If the physical robots are not connected to the computer, or not working,
@@ -38,8 +51,8 @@ from lerobot.common.robot_devices.control_configs import (
)
from lerobot.configs.policies import PreTrainedConfig
from lerobot.scripts.control_robot import calibrate, record, replay, teleoperate
-from tests.test_robots import make_robot
-from tests.utils import DEVICE, TEST_ROBOT_TYPES, mock_calibration_dir, require_robot
+from tests.robots.test_robots import make_robot
+from tests.utils import TEST_ROBOT_TYPES, mock_calibration_dir, require_robot
@pytest.mark.parametrize("robot_type, mock", TEST_ROBOT_TYPES)
@@ -171,7 +184,7 @@ def test_record_and_replay_and_policy(tmp_path, request, robot_type, mock):
replay(robot, replay_cfg)
policy_cfg = ACTConfig()
- policy = make_policy(policy_cfg, ds_meta=dataset.meta, device=DEVICE)
+ policy = make_policy(policy_cfg, ds_meta=dataset.meta)
out_dir = tmp_path / "logger"
@@ -216,8 +229,6 @@ def test_record_and_replay_and_policy(tmp_path, request, robot_type, mock):
display_cameras=False,
play_sounds=False,
num_image_writer_processes=num_image_writer_processes,
- device=DEVICE,
- use_amp=False,
)
rec_eval_cfg.policy = PreTrainedConfig.from_pretrained(pretrained_policy_path)
diff --git a/tests/test_robots.py b/tests/robots/test_robots.py
similarity index 89%
rename from tests/test_robots.py
rename to tests/robots/test_robots.py
index c5734a4c..71343eba 100644
--- a/tests/test_robots.py
+++ b/tests/robots/test_robots.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
"""
Tests for physical robots and their mocked versions.
If the physical robots are not connected to the computer, or not working,
diff --git a/tests/test_utils.py b/tests/test_utils.py
deleted file mode 100644
index b2f14694..00000000
--- a/tests/test_utils.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import torch
-from datasets import Dataset
-
-from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index
-from lerobot.common.datasets.utils import (
- hf_transform_to_torch,
-)
-
-
-def test_calculate_episode_data_index():
- dataset = Dataset.from_dict(
- {
- "timestamp": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
- "index": [0, 1, 2, 3, 4, 5],
- "episode_index": [0, 0, 1, 2, 2, 2],
- },
- )
- dataset.set_transform(hf_transform_to_torch)
- episode_data_index = calculate_episode_data_index(dataset)
- assert torch.equal(episode_data_index["from"], torch.tensor([0, 2, 3]))
- assert torch.equal(episode_data_index["to"], torch.tensor([2, 3, 6]))
diff --git a/tests/test_io_utils.py b/tests/utils/test_io_utils.py
similarity index 77%
rename from tests/test_io_utils.py
rename to tests/utils/test_io_utils.py
index d14f7adc..c1b776db 100644
--- a/tests/test_io_utils.py
+++ b/tests/utils/test_io_utils.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import json
from pathlib import Path
from typing import Any
diff --git a/tests/test_logging_utils.py b/tests/utils/test_logging_utils.py
similarity index 84%
rename from tests/test_logging_utils.py
rename to tests/utils/test_logging_utils.py
index 72385496..1ba1829e 100644
--- a/tests/test_logging_utils.py
+++ b/tests/utils/test_logging_utils.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import pytest
from lerobot.common.utils.logging_utils import AverageMeter, MetricsTracker
diff --git a/tests/test_random_utils.py b/tests/utils/test_random_utils.py
similarity index 83%
rename from tests/test_random_utils.py
rename to tests/utils/test_random_utils.py
index 8eee2b68..daf08a89 100644
--- a/tests/test_random_utils.py
+++ b/tests/utils/test_random_utils.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
import random
import numpy as np
diff --git a/tests/test_train_utils.py b/tests/utils/test_train_utils.py
similarity index 81%
rename from tests/test_train_utils.py
rename to tests/utils/test_train_utils.py
index d6ed0063..b78f6e49 100644
--- a/tests/test_train_utils.py
+++ b/tests/utils/test_train_utils.py
@@ -1,3 +1,16 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from pathlib import Path
from unittest.mock import Mock, patch