diff --git a/lerobot/common/envs/abstract.py b/lerobot/common/envs/abstract.py index 0754fb76..8d1a09de 100644 --- a/lerobot/common/envs/abstract.py +++ b/lerobot/common/envs/abstract.py @@ -27,7 +27,6 @@ class AbstractEnv(EnvBase): self.image_size = image_size self.num_prev_obs = num_prev_obs self.num_prev_action = num_prev_action - self._rendering_hooks = [] if pixels_only: assert from_pixels @@ -45,16 +44,6 @@ class AbstractEnv(EnvBase): raise NotImplementedError() # self._prev_action_queue = deque(maxlen=self.num_prev_action) - def register_rendering_hook(self, func): - self._rendering_hooks.append(func) - - def call_rendering_hooks(self): - for func in self._rendering_hooks: - func(self) - - def reset_rendering_hooks(self): - self._rendering_hooks = [] - @abc.abstractmethod def render(self, mode="rgb_array", width=640, height=480): raise NotImplementedError() diff --git a/lerobot/common/envs/aloha/env.py b/lerobot/common/envs/aloha/env.py index 6f8fded1..e09564fb 100644 --- a/lerobot/common/envs/aloha/env.py +++ b/lerobot/common/envs/aloha/env.py @@ -164,7 +164,6 @@ class AlohaEnv(AbstractEnv): batch_size=[], ) - self.call_rendering_hooks() return td def _step(self, tensordict: TensorDict): @@ -189,8 +188,6 @@ class AlohaEnv(AbstractEnv): stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) obs = stacked_obs - self.call_rendering_hooks() - td = TensorDict( { "observation": TensorDict(obs, batch_size=[]), diff --git a/lerobot/common/envs/pusht/env.py b/lerobot/common/envs/pusht/env.py index aadf626c..f440d443 100644 --- a/lerobot/common/envs/pusht/env.py +++ b/lerobot/common/envs/pusht/env.py @@ -116,7 +116,6 @@ class PushtEnv(AbstractEnv): batch_size=[], ) - self.call_rendering_hooks() return td def _step(self, tensordict: TensorDict): @@ -139,8 +138,6 @@ class PushtEnv(AbstractEnv): stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) obs = stacked_obs - self.call_rendering_hooks() - td = TensorDict( { "observation": TensorDict(obs, batch_size=[]), diff --git a/lerobot/common/envs/simxarm.py b/lerobot/common/envs/simxarm.py index d0612625..eac3666d 100644 --- a/lerobot/common/envs/simxarm.py +++ b/lerobot/common/envs/simxarm.py @@ -118,7 +118,6 @@ class SimxarmEnv(AbstractEnv): else: raise NotImplementedError() - self.call_rendering_hooks() return td def _step(self, tensordict: TensorDict): @@ -152,8 +151,6 @@ class SimxarmEnv(AbstractEnv): stacked_obs["state"] = torch.stack(list(self._prev_obs_state_queue)) obs = stacked_obs - self.call_rendering_hooks() - td = TensorDict( { "observation": self._format_raw_obs(raw_obs), diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 7127b24d..e98df19c 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -101,8 +101,6 @@ def eval_policy( if return_first_video and i == 0: first_video = batch_stacked_frames[0].transpose(0, 3, 1, 2) - env.reset_rendering_hooks() - for thread in threads: thread.join()