2024-01-29 20:49:30 +08:00
|
|
|
import pytest
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
import torch
|
2024-03-31 23:05:25 +08:00
|
|
|
from lerobot.common.datasets.factory import make_dataset
|
2024-04-05 21:35:20 +08:00
|
|
|
import gymnasium as gym
|
|
|
|
from gymnasium.utils.env_checker import check_env
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-02-20 22:22:16 +08:00
|
|
|
from lerobot.common.envs.factory import make_env
|
2024-03-28 02:33:48 +08:00
|
|
|
from lerobot.common.utils import init_hydra_config
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
from lerobot.common.envs.utils import preprocess_observation
|
|
|
|
|
|
|
|
# import dmc_aloha # noqa: F401
|
|
|
|
|
2024-03-28 02:33:48 +08:00
|
|
|
from .utils import DEVICE, DEFAULT_CONFIG_PATH
|
2024-02-25 18:50:23 +08:00
|
|
|
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
# def print_spec_rollout(env):
|
|
|
|
# print("observation_spec:", env.observation_spec)
|
|
|
|
# print("action_spec:", env.action_spec)
|
|
|
|
# print("reward_spec:", env.reward_spec)
|
|
|
|
# print("done_spec:", env.done_spec)
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
# td = env.reset()
|
|
|
|
# print("reset tensordict", td)
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
# td = env.rand_step(td)
|
|
|
|
# print("random step tensordict", td)
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
# def simple_rollout(steps=100):
|
|
|
|
# # preallocate:
|
|
|
|
# data = TensorDict({}, [steps])
|
|
|
|
# # reset
|
|
|
|
# _data = env.reset()
|
|
|
|
# for i in range(steps):
|
|
|
|
# _data["action"] = env.action_spec.rand()
|
|
|
|
# _data = env.step(_data)
|
|
|
|
# data[i] = _data
|
|
|
|
# _data = step_mdp(_data, keep_other=True)
|
|
|
|
# return data
|
2024-01-29 20:49:30 +08:00
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
# print("data from rollout:", simple_rollout(100))
|
2024-02-20 20:26:57 +08:00
|
|
|
|
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
@pytest.mark.skip("TODO")
|
2024-03-26 23:23:34 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"task,from_pixels,pixels_only",
|
|
|
|
[
|
|
|
|
("sim_insertion", True, False),
|
|
|
|
("sim_insertion", True, True),
|
|
|
|
("sim_transfer_cube", True, False),
|
|
|
|
("sim_transfer_cube", True, True),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_aloha(task, from_pixels, pixels_only):
|
|
|
|
env = AlohaEnv(
|
|
|
|
task,
|
|
|
|
from_pixels=from_pixels,
|
|
|
|
pixels_only=pixels_only,
|
|
|
|
image_size=[3, 480, 640] if from_pixels else None,
|
|
|
|
)
|
|
|
|
# print_spec_rollout(env)
|
|
|
|
check_env_specs(env)
|
|
|
|
|
|
|
|
|
2024-02-20 20:26:57 +08:00
|
|
|
@pytest.mark.parametrize(
|
2024-04-06 00:21:07 +08:00
|
|
|
"env_task, obs_type",
|
2024-02-20 20:26:57 +08:00
|
|
|
[
|
2024-04-05 21:35:20 +08:00
|
|
|
("XarmLift-v0", "state"),
|
|
|
|
("XarmLift-v0", "pixels"),
|
|
|
|
("XarmLift-v0", "pixels_agent_pos"),
|
2024-04-06 00:21:07 +08:00
|
|
|
# TODO(aliberts): Add gym_xarm other tasks
|
2024-02-20 20:26:57 +08:00
|
|
|
],
|
|
|
|
)
|
2024-04-05 21:35:20 +08:00
|
|
|
def test_xarm(env_task, obs_type):
|
2024-04-06 00:21:07 +08:00
|
|
|
import gym_xarm # noqa: F401
|
2024-04-05 21:35:20 +08:00
|
|
|
env = gym.make(f"gym_xarm/{env_task}", obs_type=obs_type)
|
|
|
|
check_env(env)
|
2024-02-20 20:26:57 +08:00
|
|
|
|
|
|
|
|
2024-04-06 00:21:07 +08:00
|
|
|
|
2024-02-20 20:26:57 +08:00
|
|
|
@pytest.mark.parametrize(
|
2024-04-06 00:21:07 +08:00
|
|
|
"env_task, obs_type",
|
2024-02-20 20:26:57 +08:00
|
|
|
[
|
2024-04-06 00:21:07 +08:00
|
|
|
("PushTPixels-v0", "state"),
|
|
|
|
("PushTPixels-v0", "pixels"),
|
|
|
|
("PushTPixels-v0", "pixels_agent_pos"),
|
2024-02-20 20:26:57 +08:00
|
|
|
],
|
|
|
|
)
|
2024-04-06 00:21:07 +08:00
|
|
|
def test_pusht(env_task, obs_type):
|
|
|
|
import gym_pusht # noqa: F401
|
|
|
|
env = gym.make(f"gym_pusht/{env_task}", obs_type=obs_type)
|
|
|
|
check_env(env)
|
2024-02-20 22:22:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2024-02-26 01:42:47 +08:00
|
|
|
"env_name",
|
2024-02-20 22:22:16 +08:00
|
|
|
[
|
|
|
|
"pusht",
|
2024-04-06 00:21:07 +08:00
|
|
|
"simxarm",
|
|
|
|
# "aloha",
|
2024-02-20 22:22:16 +08:00
|
|
|
],
|
|
|
|
)
|
2024-02-26 01:42:47 +08:00
|
|
|
def test_factory(env_name):
|
2024-03-28 02:33:48 +08:00
|
|
|
cfg = init_hydra_config(
|
|
|
|
DEFAULT_CONFIG_PATH,
|
|
|
|
overrides=[f"env={env_name}", f"device={DEVICE}"],
|
|
|
|
)
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
|
2024-03-31 23:05:25 +08:00
|
|
|
dataset = make_dataset(cfg)
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
|
2024-02-20 22:22:16 +08:00
|
|
|
env = make_env(cfg)
|
2024-04-06 00:21:07 +08:00
|
|
|
obs, info = env.reset()
|
|
|
|
obs = {key: obs[key][None, ...] for key in obs}
|
|
|
|
obs = preprocess_observation(obs, transform=dataset.transform)
|
2024-03-31 23:05:25 +08:00
|
|
|
for key in dataset.image_keys:
|
2024-04-06 00:21:07 +08:00
|
|
|
img = obs[key]
|
Add Aloha env and ACT policy
WIP Aloha env tests pass
Rendering works (fps look fast tho? TODO action bounding is too wide [-1,1])
Update README
Copy past from act repo
Remove download.py add a WIP for Simxarm
Remove download.py add a WIP for Simxarm
Add act yaml (TODO: try train.py)
Training can runs (TODO: eval)
Add tasks without end_effector that are compatible with dataset, Eval can run (TODO: training and pretrained model)
Add AbstractEnv, Refactor AlohaEnv, Add rendering_hook in env, Minor modifications, (TODO: Refactor Pusht and Simxarm)
poetry lock
fix bug in compute_stats for action normalization
fix more bugs in normalization
fix training
fix import
PushtEnv inheriates AbstractEnv, Improve factory Normalization
Add _make_env to EnvAbstract
Add call_rendering_hooks to pusht env
SimxarmEnv inherites from AbstractEnv (NOT TESTED)
Add aloha tests artifacts + update pusht stats
fix image normalization: before env was in [0,1] but dataset in [0,255], and now both in [0,255]
Small fix on simxarm
Add next to obs
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
Add top camera to Aloha env (TODO: make it compatible with set of cameras)
2024-03-08 17:47:39 +08:00
|
|
|
assert img.dtype == torch.float32
|
|
|
|
# TODO(rcadene): we assume for now that image normalization takes place in the model
|
|
|
|
assert img.max() <= 1.0
|
|
|
|
assert img.min() >= 0.0
|