name: Tests

on:
  pull_request:
    branches:
      - main
    types: [opened, synchronize, reopened, labeled]
  push:
    branches:
      - main

jobs:
  tests:
    if: |
      ${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'CI') }} ||
      ${{ github.event_name == 'push' }}
    runs-on: ubuntu-latest
    env:
      POETRY_VERSION: 1.8.2
      DATA_DIR: tests/data
      MUJOCO_GL: egl
    steps:
      #----------------------------------------------
      #       check-out repo and set-up python
      #----------------------------------------------
      - name: Check out repository
        uses: actions/checkout@v4
        with:
          lfs: true

      - name: Set up python
        id: setup-python
        uses: actions/setup-python@v5
        with:
          python-version: '3.10'

      #----------------------------------------------
      #         install & configure poetry
      #----------------------------------------------
      - name: Load cached Poetry installation
        id: restore-poetry-cache
        uses: actions/cache/restore@v3
        with:
          path: ~/.local
          key: poetry-${{ env.POETRY_VERSION }}

      - name: Install Poetry
        if: steps.restore-poetry-cache.outputs.cache-hit != 'true'
        uses: snok/install-poetry@v1
        with:
          version: ${{ env.POETRY_VERSION }}
          virtualenvs-create: true
          installer-parallel: true

      - name: Save cached Poetry installation
        if: |
          steps.restore-poetry-cache.outputs.cache-hit != 'true' &&
          github.ref_name == 'main'
        id: save-poetry-cache
        uses: actions/cache/save@v3
        with:
          path: ~/.local
          key: poetry-${{ env.POETRY_VERSION }}

      - name: Configure Poetry
        run: poetry config virtualenvs.in-project true

      #----------------------------------------------
      #           install dependencies
      #----------------------------------------------
      # TODO(aliberts): move to gpu runners
      - name: Select cpu dependencies  # HACK
        run: cp -t . .github/poetry/cpu/pyproject.toml .github/poetry/cpu/poetry.lock

      - name: Load cached venv
        id: restore-dependencies-cache
        uses: actions/cache/restore@v3
        with:
          path: .venv
          key: venv-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}

      - name: Install dependencies
        if: steps.restore-dependencies-cache.outputs.cache-hit != 'true'
        env:
          TMPDIR: ~/tmp
          TEMP: ~/tmp
          TMP: ~/tmp
        run: |
          mkdir ~/tmp
          poetry install --no-interaction --no-root

      - name: Save cached venv
        if: |
            steps.restore-dependencies-cache.outputs.cache-hit != 'true' &&
            github.ref_name == 'main'
        id: save-dependencies-cache
        uses: actions/cache/save@v3
        with:
          path: .venv
          key: venv-${{ steps.setup-python.outputs.python-version }}-${{ env.POETRY_VERSION }}-${{ hashFiles('**/poetry.lock') }}

      - name: Install libegl1-mesa-dev (to use MUJOCO_GL=egl)
        run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev

      #----------------------------------------------
      #             install project
      #----------------------------------------------
      - name: Install project
        run: poetry install --no-interaction

      #----------------------------------------------
      #            run tests & coverage
      #----------------------------------------------
      - name: Run tests
        env:
          LEROBOT_TESTS_DEVICE: cpu
        run: |
          source .venv/bin/activate
          pytest --cov=./lerobot --cov-report=xml tests

      #   TODO(aliberts): Link with HF Codecov account
      # - name: Upload coverage reports to Codecov with GitHub Action
      #   uses: codecov/codecov-action@v4
      #   with:
      #     files: ./coverage.xml
      #     verbose: true

      #----------------------------------------------
      #            run end-to-end tests
      #----------------------------------------------
      - name: Test train ACT on ALOHA end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/train.py \
            policy=act \
            env=aloha \
            wandb.enable=False \
            offline_steps=2 \
            online_steps=0 \
            device=cpu \
            save_model=true \
            save_freq=2 \
            horizon=20 \
            policy.batch_size=2 \
            hydra.run.dir=tests/outputs/act/

      - name: Test eval ACT on ALOHA end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/eval.py \
            --config tests/outputs/act/.hydra/config.yaml \
            eval_episodes=1 \
            env.episode_length=8 \
            device=cpu \
            policy.pretrained_model_path=tests/outputs/act/models/2.pt

      # TODO(aliberts): This takes ~2mn to run, needs to be improved
      # - name: Test eval ACT on ALOHA end-to-end (policy is None)
      #   run: |
      #     source .venv/bin/activate
      #     python lerobot/scripts/eval.py \
      #       --config lerobot/configs/default.yaml \
      #       policy=act \
      #       env=aloha \
      #       eval_episodes=1 \
      #       device=cpu

      - name: Test train Diffusion on PushT end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/train.py \
            policy=diffusion \
            env=pusht \
            wandb.enable=False \
            offline_steps=2 \
            online_steps=0 \
            device=cpu \
            save_model=true \
            save_freq=2 \
            hydra.run.dir=tests/outputs/diffusion/

      - name: Test eval Diffusion on PushT end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/eval.py \
            --config tests/outputs/diffusion/.hydra/config.yaml \
            eval_episodes=1 \
            env.episode_length=8 \
            device=cpu \
            policy.pretrained_model_path=tests/outputs/diffusion/models/2.pt

      - name: Test eval Diffusion on PushT end-to-end (policy is None)
        run: |
          source .venv/bin/activate
          python lerobot/scripts/eval.py \
            --config lerobot/configs/default.yaml \
            policy=diffusion  \
            env=pusht \
            eval_episodes=1 \
            device=cpu

      - name: Test train TDMPC on Simxarm end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/train.py \
            policy=tdmpc \
            env=simxarm \
            wandb.enable=False \
            offline_steps=1 \
            online_steps=1 \
            device=cpu \
            save_model=true \
            save_freq=2 \
            hydra.run.dir=tests/outputs/tdmpc/

      - name: Test eval TDMPC on Simxarm end-to-end
        run: |
          source .venv/bin/activate
          python lerobot/scripts/eval.py \
            --config tests/outputs/tdmpc/.hydra/config.yaml \
            eval_episodes=1 \
            env.episode_length=8 \
            device=cpu \
            policy.pretrained_model_path=tests/outputs/tdmpc/models/2.pt

      - name: Test eval TDPMC on Simxarm end-to-end (policy is None)
        run: |
          source .venv/bin/activate
          python lerobot/scripts/eval.py \
            --config lerobot/configs/default.yaml \
            policy=tdmpc  \
            env=simxarm \
            eval_episodes=1 \
            device=cpu