merge with main
This commit is contained in:
parent
fe284bbbd7
commit
7132790faa
16
README.md
16
README.md
|
@ -98,14 +98,18 @@ conda create -y -n lerobot python=3.10
|
|||
conda activate lerobot
|
||||
```
|
||||
|
||||
Install 🤗 LeRobot:
|
||||
When using `miniconda`, if you don't have `ffmpeg` in your environment:
|
||||
```bash
|
||||
pip install -e .
|
||||
conda install ffmpeg
|
||||
```
|
||||
|
||||
> **NOTE:** Depending on your platform, If you encounter any build errors during this step
|
||||
you may need to install `cmake` and `build-essential` for building some of our dependencies.
|
||||
On linux: `sudo apt-get install cmake build-essential`
|
||||
Install 🤗 LeRobot:
|
||||
```bash
|
||||
pip install --no-binary=av -e .
|
||||
```
|
||||
|
||||
> **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
|
||||
`sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
|
||||
|
||||
For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
|
||||
- [aloha](https://github.com/huggingface/gym-aloha)
|
||||
|
@ -114,7 +118,7 @@ For simulations, 🤗 LeRobot comes with gymnasium environments that can be inst
|
|||
|
||||
For instance, to install 🤗 LeRobot with aloha and pusht, use:
|
||||
```bash
|
||||
pip install -e ".[aloha, pusht]"
|
||||
pip install --no-binary=av -e ".[aloha, pusht]"
|
||||
```
|
||||
|
||||
To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
|
||||
|
|
|
@ -257,6 +257,7 @@ def encode_video_frames(
|
|||
) -> None:
|
||||
"""More info on ffmpeg arguments tuning on `benchmark/video/README.md`"""
|
||||
video_path = Path(video_path)
|
||||
imgs_dir = Path(imgs_dir)
|
||||
video_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ffmpeg_args = OrderedDict(
|
||||
|
|
|
@ -24,7 +24,7 @@ Designed by Physical Intelligence. Ported from Jax by Hugging Face.
|
|||
|
||||
Install pi0 extra dependencies:
|
||||
```bash
|
||||
pip install -e ".[pi0]"
|
||||
pip install --no-binary=av -e ".[pi0]"
|
||||
```
|
||||
|
||||
Example of finetuning the pi0 pretrained model (`pi0_base` in `openpi`):
|
||||
|
@ -313,7 +313,7 @@ class PI0Policy(PreTrainedPolicy):
|
|||
state = self.prepare_state(batch)
|
||||
lang_tokens, lang_masks = self.prepare_language(batch)
|
||||
actions = self.prepare_action(batch)
|
||||
actions_is_pad = batch.get("actions_is_pad")
|
||||
actions_is_pad = batch.get("action_is_pad")
|
||||
|
||||
loss_dict = {}
|
||||
losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time)
|
||||
|
|
|
@ -90,6 +90,7 @@ class WandBLogger:
|
|||
# TODO(rcadene): split train and eval, and run async eval with job_type="eval"
|
||||
job_type="train_eval",
|
||||
resume="must" if cfg.resume else None,
|
||||
mode=self.cfg.mode if self.cfg.mode in ["online", "offline", "disabled"] else "online",
|
||||
)
|
||||
print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"]))
|
||||
logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}")
|
||||
|
|
|
@ -48,6 +48,7 @@ class WandBConfig:
|
|||
entity: str | None = None
|
||||
notes: str | None = None
|
||||
run_id: str | None = None
|
||||
mode: str | None = None # Allowed values: 'online', 'offline' 'disabled'. Defaults to 'online'
|
||||
|
||||
|
||||
@dataclass
|
||||
|
|
|
@ -62,7 +62,7 @@ dependencies = [
|
|||
"omegaconf>=2.3.0",
|
||||
"opencv-python>=4.9.0",
|
||||
"packaging>=24.2",
|
||||
"av>=12.0.5",
|
||||
"av>=12.0.5,<13.0.0",
|
||||
"pymunk>=6.6.0",
|
||||
"pynput>=1.7.7",
|
||||
"pyzmq>=26.2.1",
|
||||
|
|
Loading…
Reference in New Issue