diff --git a/.dockerignore b/.dockerignore index 701807953..b8c1be153 100644 --- a/.dockerignore +++ b/.dockerignore @@ -65,7 +65,6 @@ htmlcov/ .nox/ .coverage .coverage.* -.cache nosetests.xml coverage.xml *.cover @@ -73,6 +72,11 @@ coverage.xml .hypothesis/ .pytest_cache/ +# Ignore .cache except calibration +.cache/* +!.cache/calibration/ +!.cache/calibration/** + # Translations *.mo *.pot diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4063e395f..2e6fd4490 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -21,7 +21,7 @@ Provide a simple way for the reviewer to try out your changes. Examples: ```bash -DATA_DIR=tests/data pytest -sx tests/test_stuff.py::test_something +pytest -sx tests/test_stuff.py::test_something ``` ```bash python lerobot/scripts/train.py --some.option=true diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml index f967533ae..bbee19a17 100644 --- a/.github/workflows/nightly-tests.yml +++ b/.github/workflows/nightly-tests.yml @@ -7,10 +7,8 @@ on: schedule: - cron: "0 2 * * *" -env: - DATA_DIR: tests/data +# env: # SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }} - jobs: run_all_tests_cpu: name: CPU @@ -30,13 +28,9 @@ jobs: working-directory: /lerobot steps: - name: Tests - env: - DATA_DIR: tests/data run: pytest -v --cov=./lerobot --disable-warnings tests - name: Tests end-to-end - env: - DATA_DIR: tests/data run: make test-end-to-end diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 038b44582..5de071750 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,6 +11,7 @@ on: - ".github/**" - "poetry.lock" - "Makefile" + - ".cache/**" push: branches: - main @@ -21,13 +22,13 @@ on: - ".github/**" - "poetry.lock" - "Makefile" + - ".cache/**" jobs: pytest: name: Pytest runs-on: ubuntu-latest env: - DATA_DIR: tests/data MUJOCO_GL: egl steps: - uses: actions/checkout@v4 @@ -35,13 +36,17 @@ jobs: lfs: true # Ensure LFS files are pulled - name: Install apt dependencies - run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev ffmpeg + # portaudio19-dev is needed to install pyaudio + run: | + sudo apt-get update && \ + sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev - name: Install poetry run: | pipx install poetry && poetry config virtualenvs.in-project true echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH + # TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: @@ -60,12 +65,10 @@ jobs: -W ignore::UserWarning:gymnasium.utils.env_checker:247 \ && rm -rf tests/outputs outputs - pytest-minimal: name: Pytest (minimal install) runs-on: ubuntu-latest env: - DATA_DIR: tests/data MUJOCO_GL: egl steps: - uses: actions/checkout@v4 @@ -80,6 +83,7 @@ jobs: pipx install poetry && poetry config virtualenvs.in-project true echo "${{ github.workspace }}/.venv/bin" >> $GITHUB_PATH + # TODO(rcadene, aliberts): python 3.12 seems to be used in the tests, not python 3.10 - name: Set up Python 3.10 uses: actions/setup-python@v5 with: @@ -97,12 +101,11 @@ jobs: -W ignore::UserWarning:gymnasium.utils.env_checker:247 \ && rm -rf tests/outputs outputs - + # TODO(aliberts, rcadene): redesign after v2 migration / removing hydra end-to-end: name: End-to-end runs-on: ubuntu-latest env: - DATA_DIR: tests/data MUJOCO_GL: egl steps: - uses: actions/checkout@v4 @@ -110,7 +113,10 @@ jobs: lfs: true # Ensure LFS files are pulled - name: Install apt dependencies - run: sudo apt-get update && sudo apt-get install -y libegl1-mesa-dev + # portaudio19-dev is needed to install pyaudio + run: | + sudo apt-get update && \ + sudo apt-get install -y libegl1-mesa-dev portaudio19-dev - name: Install poetry run: | diff --git a/.gitignore b/.gitignore index 0e203a394..dfd929b81 100644 --- a/.gitignore +++ b/.gitignore @@ -153,3 +153,6 @@ dmypy.json # Cython debug symbols cython_debug/ + +# slurm scripts +slurm/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 822197ba2..b8c198568 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -267,7 +267,7 @@ We use `pytest` in order to run the tests. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash -DATA_DIR="tests/data" python -m pytest -sv ./tests +python -m pytest -sv ./tests ``` diff --git a/Makefile b/Makefile index f6517497e..a95d7614e 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,8 @@ test-end-to-end: ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval ${MAKE} DEVICE=$(DEVICE) test-default-ete-eval ${MAKE} DEVICE=$(DEVICE) test-act-pusht-tutorial + ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-accelerate-amp + ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval-accelerate-amp test-act-ete-train: python lerobot/scripts/train.py \ @@ -188,3 +190,31 @@ test-act-pusht-tutorial: training.image_transforms.enable=true \ hydra.run.dir=tests/outputs/act_pusht/ rm lerobot/configs/policy/created_by_Makefile.yaml + + +test-act-ete-train-accelerate-amp: + python -m accelerate.commands.launch --cpu --mixed-precision=fp16 lerobot/scripts/train.py \ + policy=act \ + policy.dim_model=64 \ + env=aloha \ + wandb.enable=False \ + training.offline_steps=2 \ + training.online_steps=0 \ + eval.n_episodes=1 \ + eval.batch_size=1 \ + device=$(DEVICE) \ + training.save_checkpoint=true \ + training.save_freq=2 \ + policy.n_action_steps=20 \ + policy.chunk_size=20 \ + training.batch_size=2 \ + hydra.run.dir=tests/outputs/act_amp/ \ + training.image_transforms.enable=true + +test-act-ete-eval-accelerate-amp: + python -m accelerate.commands.launch --cpu --mixed-precision=fp16 lerobot/scripts/eval.py \ + -p tests/outputs/act_amp/checkpoints/000002/pretrained_model \ + eval.n_episodes=1 \ + eval.batch_size=1 \ + env.episode_length=8 \ + device=$(DEVICE) diff --git a/README.md b/README.md index 703e64881..d749081b7 100644 --- a/README.md +++ b/README.md @@ -23,15 +23,15 @@

-

Hot new tutorial: Getting started with real-world robots

+

New robot in town: SO-100

- Koch v1.1 leader and follower arms -

We just dropped an in-depth tutorial on how to build your own robot!

+ SO-100 leader and follower arms +

We just added a new tutorial on how to build a more affordable robot, at the price of $110 per arm!

Teach it new skills by showing it a few moves with just a laptop.

Then watch your homemade robot act autonomously 🤯

-

For more info, see our thread on X or our tutorial page.

+

Follow the link to the full tutorial for SO-100.


@@ -55,9 +55,9 @@ - - - + + + @@ -66,6 +66,11 @@
ACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT envACT policy on ALOHA envTDMPC policy on SimXArm envDiffusion policy on PushT env
ACT policy on ALOHA env
+### News + +* **1-11-2024**: we support the `accelerate` library for distributed training and evaluation on multiple GPUs. + + ### Acknowledgment - Thanks to Tony Zaho, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io). @@ -144,7 +149,7 @@ wandb login ### Visualize datasets -Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically download data from the Hugging Face hub. +Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub. You can also locally visualize episodes from a dataset on the hub by executing our script from the command line: ```bash @@ -153,10 +158,12 @@ python lerobot/scripts/visualize_dataset.py \ --episode-index 0 ``` -or from a dataset in a local folder with the root `DATA_DIR` environment variable (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) +or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`) ```bash -DATA_DIR='./my_local_data_dir' python lerobot/scripts/visualize_dataset.py \ +python lerobot/scripts/visualize_dataset.py \ --repo-id lerobot/pusht \ + --root ./my_local_data_dir \ + --local-files-only 1 \ --episode-index 0 ``` @@ -208,12 +215,10 @@ dataset attributes: A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely: - hf_dataset stored using Hugging Face datasets library serialization to parquet -- videos are stored in mp4 format to save space or png files -- episode_data_index saved using `safetensor` tensor serialization format -- stats saved using `safetensor` tensor serialization format -- info are saved using JSON +- videos are stored in mp4 format to save space +- metadata are stored in plain json/jsonl files -Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can set the `DATA_DIR` environment variable to your root dataset folder as illustrated in the above section on dataset visualization. +Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can use the `local_files_only` argument and specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location. ### Evaluate a pretrained policy @@ -280,12 +285,36 @@ To use wandb for logging training and evaluation curves, make sure you've run `w wandb.enable=true ``` -A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explaination of some commonly used metrics in logs. +A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](https://github.com/huggingface/lerobot/blob/main/examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs. ![](media/wandb.png) Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions. +### Distributed training end evaluation on multiple GPUs/Nodes: + +We use the [accelerate](https://huggingface.co/docs/accelerate/basic_tutorials/launch#using-accelerate-launch) library to handle training/evaluating on multiple GPUs/nodes. + +To perform distributed training you should use the `python -m accelerate.commands.launch` command. Here’s an example of launching a training script across 2 GPUs : + +```bash +accelerate launch --num_processes=2 lerobot/scripts/train.py \ + policy=act \ + env=aloha \ + env.task=AlohaTransferCube-v0 \ + dataset_repo_id=lerobot/aloha_sim_transfer_cube_human \ +``` +Check out [example 7](./examples/12_train_policy_accelerate.py) + +(Note: Make sure you accelerate is installed otherwise do: `pip install accelerate`) + +And to evaluate a policy you can use the following: +``` +accelerate launch --num_processes=1 --mixed_precision=fp16 lerobot/scripts/eval.py -p lerobot/diffusion_pusht +``` + +Note that to reproduce the same results across different GPUs configurations, you should take into account several hyperparameters (explained more [here](https://huggingface.co/docs/accelerate/v1.1.0/en/concept_guides/performance)). In particular, the batch size is multiplied by the number of GPUs, so you should either divide the batch size or the number of training steps by the number of GPUs (be carefull if you are using an lr scheduler). + #### Reproduce state-of-the-art (SOTA) We have organized our configuration files (found under [`lerobot/configs`](./lerobot/configs)) such that they reproduce SOTA results from a given model variant in their respective original works. Simply running: diff --git a/Tests.ipynb b/Tests.ipynb new file mode 100644 index 000000000..15ab421e0 --- /dev/null +++ b/Tests.ipynb @@ -0,0 +1,796 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4f42f852-922a-45af-bf82-fbc156f8de76", + "metadata": {}, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "454ed22e-0629-4608-a826-67b4760ea4d5", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from pathlib import Path\n", + "from pprint import pprint\n", + "\n", + "import imageio\n", + "import torch\n", + "\n", + "import lerobot\n", + "from IPython.display import Video" + ] + }, + { + "cell_type": "markdown", + "id": "3d73c1af-cfaf-4a9d-a6bf-33c6e45669fd", + "metadata": {}, + "source": [ + "## Datasets" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b49acf4-ad4a-4170-b129-a7e2e8f48958", + "metadata": {}, + "outputs": [], + "source": [ + "from lerobot.common.datasets.lerobot_dataset import LeRobotDataset\n", + "\n", + "# print(\"List of available datasets:\")\n", + "# pprint(lerobot.available_datasets)\n", + "\n", + "repo_id = \"lerobot/aloha_sim_insertion_human\"\n", + "# You can easily load a dataset from a Hugging Face repository\n", + "dataset = LeRobotDataset(repo_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "f422e316-ff93-4137-b8f9-090cfac3cbc2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LeRobotDataset(\n", + " Repository ID: 'lerobot/aloha_sim_insertion_human',\n", + " Split: 'train',\n", + " Number of Samples: 25000,\n", + " Number of Episodes: 50,\n", + " Type: video (.mp4),\n", + " Recorded Frames per Second: 50,\n", + " Camera Keys: ['observation.images.top'],\n", + " Video Frame Keys: ['observation.images.top'],\n", + " Transformations: None,\n", + " Codebase Version: v1.6,\n", + ")\n", + "Dataset({\n", + " features: ['observation.images.top', 'observation.state', 'action', 'episode_index', 'frame_index', 'timestamp', 'next.done', 'index'],\n", + " num_rows: 25000\n", + "})\n", + "\n", + "average number of frames per episode: 500.000\n", + "frames per second used during data collection: dataset.fps=50\n", + "keys to access images from cameras: dataset.camera_keys=['observation.images.top']\n", + "\n" + ] + } + ], + "source": [ + "# LeRobotDataset is actually a thin wrapper around an underlying Hugging Face dataset\n", + "# (see https://huggingface.co/docs/datasets/index for more information).\n", + "print(dataset)\n", + "print(dataset.hf_dataset)\n", + "\n", + "# And provides additional utilities for robotics and compatibility with Pytorch\n", + "print(f\"\\naverage number of frames per episode: {dataset.num_samples / dataset.num_episodes:.3f}\")\n", + "print(f\"frames per second used during data collection: {dataset.fps=}\")\n", + "print(f\"keys to access images from cameras: {dataset.camera_keys=}\\n\")\n", + "\n", + "# Access frame indexes associated to first episode\n", + "episode_index = 0\n", + "from_idx = dataset.episode_data_index[\"from\"][episode_index].item()\n", + "to_idx = dataset.episode_data_index[\"to\"][episode_index].item()\n", + "\n", + "# LeRobot datasets actually subclass PyTorch datasets so you can do everything you know and love from working\n", + "# with the latter, like iterating through the dataset. Here we grab all the image frames.\n", + "frames = [dataset[idx][\"observation.images.top\"] for idx in range(from_idx, to_idx)]\n", + "\n", + "# Video frames are now float32 in range [0,1] channel first (c,h,w) to follow pytorch convention. To visualize\n", + "# them, we convert to uint8 in range [0,255]\n", + "frames = [(frame * 255).type(torch.uint8) for frame in frames]\n", + "# and to channel last (h,w,c).\n", + "frames = [frame.permute((1, 2, 0)).numpy() for frame in frames]" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "155e0586-47ea-4e13-8bb3-36b0ac54c50d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import Video\n", + "# Finally, we save the frames to a mp4 video for visualization.\n", + "dir_path = \"outputs/examples/1_load_lerobot_dataset\"\n", + "Path(dir_path).mkdir(parents=True, exist_ok=True)\n", + "video_path = dir_path + \"/episode_0.mp4\"\n", + "imageio.mimsave(video_path, frames, fps=dataset.fps)\n", + "Video(video_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "5d75427d-1fc7-408b-937a-a1c998878537", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Fetching 56 files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 1539.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "dataset[0]['observation.images.top'].shape=torch.Size([4, 3, 480, 640])\n", + "dataset[0]['observation.state'].shape=torch.Size([8, 14])\n", + "dataset[0]['action'].shape=torch.Size([64, 14])\n", + "\n", + "batch['observation.images.top'].shape=torch.Size([32, 4, 3, 480, 640])\n", + "batch['observation.state'].shape=torch.Size([32, 8, 14])\n", + "batch['action'].shape=torch.Size([32, 64, 14])\n" + ] + } + ], + "source": [ + "# For many machine learning applications we need to load the history of past observations or trajectories of\n", + "# future actions. Our datasets can load previous and future frames for each key/modality, using timestamps\n", + "# differences with the current loaded frame. For instance:\n", + "delta_timestamps = {\n", + " # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame\n", + " \"observation.images.top\": [-1, -0.5, -0.20, 0],\n", + " # loads 8 state vectors: 1.5 seconds before, 1 second before, ... 20 ms, 10 ms, and current frame\n", + " \"observation.state\": [-1.5, -1, -0.5, -0.20, -0.10, -0.02, -0.01, 0],\n", + " # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future\n", + " \"action\": [t / dataset.fps for t in range(64)],\n", + "}\n", + "dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)\n", + "print(f\"\\n{dataset[0]['observation.images.top'].shape=}\") # (4,c,h,w)\n", + "print(f\"{dataset[0]['observation.state'].shape=}\") # (8,c)\n", + "print(f\"{dataset[0]['action'].shape=}\\n\") # (64,c)\n", + "\n", + "# Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just\n", + "# PyTorch datasets.\n", + "dataloader = torch.utils.data.DataLoader(\n", + " dataset,\n", + " num_workers=0,\n", + " batch_size=32,\n", + " shuffle=True,\n", + ")\n", + "for batch in dataloader:\n", + " print(f\"{batch['observation.images.top'].shape=}\") # (32,4,c,h,w)\n", + " print(f\"{batch['observation.state'].shape=}\") # (32,8,c)\n", + " print(f\"{batch['action'].shape=}\") # (32,64,c)\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "699cebdb-9edb-45a0-89c8-06085e09f7ca", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([ 0.0281, -0.5264, 0.8721, -0.0157, 0.4200, -0.0215, 0.1536, -0.0547,\n", + " -0.8091, 0.9081, 0.0494, 0.3163, 0.1224, -0.0032])" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "batch['observation.state'][0][0]" + ] + }, + { + "cell_type": "markdown", + "id": "aa7236e1-53ea-4f6f-96cf-e0f344282c6e", + "metadata": {}, + "source": [ + "## Training" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7f4eda23-6f01-4e2a-bf97-3a87d8c2f761", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Generating train split: 25650 examples [00:00, 381978.62 examples/s]\n", + "Fetching 212 files: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 212/212 [00:07<00:00, 28.90it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "step: 0 loss: 1.193\n", + "step: 250 loss: 0.067\n", + "step: 500 loss: 0.054\n", + "step: 750 loss: 0.056\n", + "step: 1000 loss: 0.049\n", + "step: 1250 loss: 0.057\n", + "step: 1500 loss: 0.044\n", + "step: 1750 loss: 0.048\n", + "step: 2000 loss: 0.050\n", + "step: 2250 loss: 0.068\n", + "step: 2500 loss: 0.045\n", + "step: 2750 loss: 0.025\n", + "step: 3000 loss: 0.045\n", + "step: 3250 loss: 0.025\n", + "step: 3500 loss: 0.040\n", + "step: 3750 loss: 0.041\n", + "step: 4000 loss: 0.052\n", + "step: 4250 loss: 0.034\n", + "step: 4500 loss: 0.028\n", + "step: 4750 loss: 0.040\n" + ] + } + ], + "source": [ + "from pathlib import Path\n", + "\n", + "import torch\n", + "\n", + "from lerobot.common.datasets.lerobot_dataset import LeRobotDataset\n", + "from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig\n", + "from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy\n", + "\n", + "# Create a directory to store the training checkpoint.\n", + "output_directory = Path(\"outputs/train/example_pusht_diffusion\")\n", + "output_directory.mkdir(parents=True, exist_ok=True)\n", + "\n", + "# Number of offline training steps (we'll only do offline training for this example.)\n", + "# Adjust as you prefer. 5000 steps are needed to get something worth evaluating.\n", + "training_steps = 5000\n", + "device = torch.device(\"cuda\")\n", + "log_freq = 250\n", + "\n", + "# Set up the dataset.\n", + "delta_timestamps = {\n", + " # Load the previous image and state at -0.1 seconds before current frame,\n", + " # then load current image and state corresponding to 0.0 second.\n", + " \"observation.image\": [-0.1, 0.0],\n", + " \"observation.state\": [-0.1, 0.0],\n", + " # Load the previous action (-0.1), the next action to be executed (0.0),\n", + " # and 14 future actions with a 0.1 seconds spacing. All these actions will be\n", + " # used to supervise the policy.\n", + " \"action\": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],\n", + "}\n", + "dataset = LeRobotDataset(\"lerobot/pusht\", delta_timestamps=delta_timestamps)\n", + "\n", + "# Set up the the policy.\n", + "# Policies are initialized with a configuration class, in this case `DiffusionConfig`.\n", + "# For this example, no arguments need to be passed because the defaults are set up for PushT.\n", + "# If you're doing something different, you will likely need to change at least some of the defaults.\n", + "cfg = DiffusionConfig()\n", + "policy = DiffusionPolicy(cfg, dataset_stats=dataset.stats)\n", + "policy.train()\n", + "policy.to(device)\n", + "\n", + "optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)\n", + "\n", + "# Create dataloader for offline training.\n", + "dataloader = torch.utils.data.DataLoader(\n", + " dataset,\n", + " num_workers=4,\n", + " batch_size=64,\n", + " shuffle=True,\n", + " pin_memory=device != torch.device(\"cpu\"),\n", + " drop_last=True,\n", + ")\n", + "\n", + "# Run training loop.\n", + "step = 0\n", + "done = False\n", + "while not done:\n", + " for batch in dataloader:\n", + " batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}\n", + " output_dict = policy.forward(batch)\n", + " loss = output_dict[\"loss\"]\n", + " loss.backward()\n", + " optimizer.step()\n", + " optimizer.zero_grad()\n", + "\n", + " if step % log_freq == 0:\n", + " print(f\"step: {step} loss: {loss.item():.3f}\")\n", + " step += 1\n", + " if step >= training_steps:\n", + " done = True\n", + " break\n", + "\n", + "# Save a policy checkpoint.\n", + "policy.save_pretrained(output_directory)" + ] + }, + { + "cell_type": "markdown", + "id": "b9ee042e-43de-489c-9ce5-497a04a48653", + "metadata": {}, + "source": [ + "## Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "771c90ad-a094-42ad-ba81-271fa27c1951", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/data/mshukor/envs/lerobot/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loading weights from local directory\n", + "GPU is available. Device set to: cuda\n", + "cuda\n" + ] + } + ], + "source": [ + "from pathlib import Path\n", + "\n", + "import gym_pusht # noqa: F401\n", + "import gymnasium as gym\n", + "import imageio\n", + "import numpy\n", + "import torch\n", + "from huggingface_hub import snapshot_download\n", + "\n", + "from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy\n", + "from lerobot.common.policies.act.modeling_act import ACTPolicy\n", + "\n", + "\n", + "\n", + "# Download the diffusion policy for pusht environment\n", + "# pretrained_policy_path = Path(snapshot_download(\"lerobot/diffusion_pusht\"))\n", + "# OR uncomment the following to evaluate a policy from the local outputs/train folder.\n", + "\n", + "# Create a directory to store the video of the evaluation\n", + "# output_directory = Path(\"outputs/eval/example_pusht_diffusion\")\n", + "# output_directory.mkdir(parents=True, exist_ok=True)\n", + "# pretrained_policy_path = Path(\"outputs/train/example_pusht_diffusion\")\n", + "# policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)\n", + "\n", + "TASK_NAME = \"lerobot_base_distributed_aloha_transfer_cube_1gpus\"\n", + "step = 50000\n", + "# Create a directory to store the video of the evaluation\n", + "output_directory = Path(f\"outputs/eval/{TASK_NAME}\")\n", + "output_directory.mkdir(parents=True, exist_ok=True)\n", + "policy_path = f\"/data/mshukor/logs/lerobot/{TASK_NAME}/checkpoints/{step:06d}/pretrained_model\"\n", + "pretrained_policy_path = Path(policy_path)\n", + "policy = ACTPolicy.from_pretrained(pretrained_policy_path)\n", + "\n", + "\n", + "policy.eval()\n", + "\n", + "# Check if GPU is available\n", + "if torch.cuda.is_available():\n", + " device = torch.device(\"cuda\")\n", + " print(\"GPU is available. Device set to:\", device)\n", + "else:\n", + " device = torch.device(\"cpu\")\n", + " print(f\"GPU is not available. Device set to: {device}. Inference will be slower than on GPU.\")\n", + " # Decrease the number of reverse-diffusion steps (trades off a bit of quality for 10x speed)\n", + " policy.diffusion.num_inference_steps = 10\n", + "\n", + "policy.to(device)\n", + "print(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8c9151b7-2298-421c-b63b-d97371875730", + "metadata": {}, + "outputs": [], + "source": [ + "import gym_aloha \n", + "import gym_xarm \n", + "# Initialize evaluation environment to render two observation types:\n", + "# an image of the scene and state/position of the agent. The environment\n", + "# also automatically stops running after 300 interactions/steps.\n", + "# env = gym.make(\n", + "# \"gym_pusht/PushT-v0\",\n", + "# obs_type=\"pixels_agent_pos\",\n", + "# max_episode_steps=300,\n", + "# )\n", + "\n", + "# env = gym.make(\n", + "# \"gym_aloha/AlohaTransferCube-v0\",\n", + "# obs_type=\"pixels_agent_pos\",\n", + "# max_episode_steps=300,\n", + "# )\n", + "\n", + "env = gym.make(\n", + " \"gym_xarm/XarmLift-v0\",\n", + " obs_type=\"pixels_agent_pos\",\n", + " max_episode_steps=300,\n", + ")\n", + "\n", + "# Reset the policy and environmens to prepare for rollout\n", + "policy.reset()\n", + "numpy_observation, info = env.reset(seed=42)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "bdebf0bd-46d5-4d53-ad56-61edabd41ed8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "step=0 reward=0.0 terminated=False\n", + "step=1 reward=0.0 terminated=False\n", + "step=2 reward=0.0 terminated=False\n", + "step=3 reward=0.0 terminated=False\n", + "step=4 reward=0.0 terminated=False\n", + "step=5 reward=0.0 terminated=False\n", + "step=6 reward=0.0 terminated=False\n", + "step=7 reward=0.0 terminated=False\n", + "step=8 reward=0.0 terminated=False\n", + "step=9 reward=0.0 terminated=False\n", + "step=10 reward=0.0 terminated=False\n", + "step=11 reward=0.0 terminated=False\n", + "step=12 reward=0.0 terminated=False\n", + "step=13 reward=0.0 terminated=False\n", + "step=14 reward=0.0 terminated=False\n", + "step=15 reward=0.0 terminated=False\n", + "step=16 reward=0.0 terminated=False\n", + "step=17 reward=0.0 terminated=False\n", + "step=18 reward=0.0 terminated=False\n", + "step=19 reward=0.0 terminated=False\n", + "step=20 reward=0.0 terminated=False\n", + "step=21 reward=0.0 terminated=False\n", + "step=22 reward=0.0 terminated=False\n", + "step=23 reward=0.0 terminated=False\n", + "step=24 reward=0.0 terminated=False\n", + "step=25 reward=0.0 terminated=False\n", + "step=26 reward=0.0 terminated=False\n", + "step=27 reward=0.0 terminated=False\n", + "step=28 reward=0.0 terminated=False\n", + "step=29 reward=0.0 terminated=False\n", + "step=30 reward=0.0 terminated=False\n", + "step=31 reward=0.0 terminated=False\n", + "step=32 reward=0.0 terminated=False\n", + "step=33 reward=0.0 terminated=False\n", + "step=34 reward=0.0 terminated=False\n", + "step=35 reward=0.0060973941747585235 terminated=False\n", + "step=36 reward=0.02622279916580214 terminated=False\n", + "step=37 reward=0.05121513810520591 terminated=False\n", + "step=38 reward=0.07973351723518816 terminated=False\n", + "step=39 reward=0.1055264807998946 terminated=False\n", + "step=40 reward=0.11397255143592261 terminated=False\n", + "step=41 reward=0.1183477905742287 terminated=False\n", + "step=42 reward=0.12188789988102575 terminated=False\n", + "step=43 reward=0.12545805998888346 terminated=False\n", + "step=44 reward=0.1288567941022673 terminated=False\n", + "step=45 reward=0.13151607159645246 terminated=False\n", + "step=46 reward=0.1334215324045937 terminated=False\n", + "step=47 reward=0.13462679837021455 terminated=False\n", + "step=48 reward=0.13541090727032962 terminated=False\n", + "step=49 reward=0.13590309886928334 terminated=False\n", + "step=50 reward=0.13633914976509004 terminated=False\n", + "step=51 reward=0.13675125461247126 terminated=False\n", + "step=52 reward=0.13710415750492036 terminated=False\n", + "step=53 reward=0.1373384382180125 terminated=False\n", + "step=54 reward=0.13744056010263833 terminated=False\n", + "step=55 reward=0.1373603108492143 terminated=False\n", + "step=56 reward=0.13713720481440303 terminated=False\n", + "step=57 reward=0.13711259085950156 terminated=False\n", + "step=58 reward=0.13711259085950156 terminated=False\n", + "step=59 reward=0.13711259085950156 terminated=False\n", + "step=60 reward=0.13711259085950156 terminated=False\n", + "step=61 reward=0.13711259085950156 terminated=False\n", + "step=62 reward=0.13711259085950156 terminated=False\n", + "step=63 reward=0.13711259085950156 terminated=False\n", + "step=64 reward=0.13711259085950156 terminated=False\n", + "step=65 reward=0.13711259085950156 terminated=False\n", + "step=66 reward=0.13711259085950156 terminated=False\n", + "step=67 reward=0.13711259085950156 terminated=False\n", + "step=68 reward=0.13711259085950156 terminated=False\n", + "step=69 reward=0.13711259085950156 terminated=False\n", + "step=70 reward=0.13711259085950156 terminated=False\n", + "step=71 reward=0.13711259085950156 terminated=False\n", + "step=72 reward=0.13711259085950156 terminated=False\n", + "step=73 reward=0.13711259085950156 terminated=False\n", + "step=74 reward=0.13711259085950156 terminated=False\n", + "step=75 reward=0.13711259085950156 terminated=False\n", + "step=76 reward=0.13711259085950156 terminated=False\n", + "step=77 reward=0.13711259085950156 terminated=False\n", + "step=78 reward=0.13711259085950156 terminated=False\n", + "step=79 reward=0.13711259085950156 terminated=False\n", + "step=80 reward=0.14165259163772553 terminated=False\n", + "step=81 reward=0.16254716720209392 terminated=False\n", + "step=82 reward=0.18038377158048674 terminated=False\n", + "step=83 reward=0.1955349503035339 terminated=False\n", + "step=84 reward=0.209590275273389 terminated=False\n", + "step=85 reward=0.22297886936917977 terminated=False\n", + "step=86 reward=0.2392411144470952 terminated=False\n", + "step=87 reward=0.2580199500579908 terminated=False\n", + "step=88 reward=0.2777329773794388 terminated=False\n", + "step=89 reward=0.29761240390900523 terminated=False\n", + "step=90 reward=0.31716465663195625 terminated=False\n", + "step=91 reward=0.3355213586040695 terminated=False\n", + "step=92 reward=0.3524998804211362 terminated=False\n", + "step=93 reward=0.36810918114197516 terminated=False\n", + "step=94 reward=0.38014989945536126 terminated=False\n", + "step=95 reward=0.3905574417126761 terminated=False\n", + "step=96 reward=0.4002848213657343 terminated=False\n", + "step=97 reward=0.4170736338625133 terminated=False\n", + "step=98 reward=0.44908708220841653 terminated=False\n", + "step=99 reward=0.49331303205450905 terminated=False\n", + "step=100 reward=0.5554300692787169 terminated=False\n", + "step=101 reward=0.6250862544059551 terminated=False\n", + "step=102 reward=0.6893315351179427 terminated=False\n", + "step=103 reward=0.7439593776734601 terminated=False\n", + "step=104 reward=0.7813831447770964 terminated=False\n", + "step=105 reward=0.8090518882538443 terminated=False\n", + "step=106 reward=0.8280920189882142 terminated=False\n", + "step=107 reward=0.8437796249119314 terminated=False\n", + "step=108 reward=0.8639611941043245 terminated=False\n", + "step=109 reward=0.8906448932017331 terminated=False\n", + "step=110 reward=0.917804351543538 terminated=False\n", + "step=111 reward=0.9421491802064923 terminated=False\n", + "step=112 reward=0.9619641827581312 terminated=False\n", + "step=113 reward=0.9752323060631967 terminated=False\n", + "step=114 reward=0.9769516793273336 terminated=False\n", + "step=115 reward=0.9769516793273336 terminated=False\n", + "step=116 reward=0.9769516793273336 terminated=False\n", + "step=117 reward=0.9791212865711658 terminated=False\n", + "step=118 reward=0.9821981164893177 terminated=False\n", + "step=119 reward=0.9825801568345679 terminated=False\n", + "step=120 reward=0.9825801568345679 terminated=False\n", + "step=121 reward=0.9825801568345679 terminated=False\n", + "step=122 reward=0.9825801568345679 terminated=False\n", + "step=123 reward=0.9825801568345679 terminated=False\n", + "step=124 reward=0.9825801568345679 terminated=False\n", + "step=125 reward=0.9825801568345679 terminated=False\n", + "step=126 reward=0.9825801568345679 terminated=False\n", + "step=127 reward=0.9825801568345679 terminated=False\n", + "step=128 reward=0.9825801568345679 terminated=False\n", + "step=129 reward=0.9825801568345679 terminated=False\n", + "step=130 reward=0.9825801568345679 terminated=False\n", + "step=131 reward=0.9825801568345679 terminated=False\n", + "step=132 reward=0.9825801568345679 terminated=False\n", + "step=133 reward=0.9825801568345679 terminated=False\n", + "step=134 reward=0.9825801568345679 terminated=False\n", + "step=135 reward=0.9825801568345679 terminated=False\n", + "step=136 reward=0.9825801568345679 terminated=False\n", + "step=137 reward=0.9825801568345679 terminated=False\n", + "step=138 reward=0.9825801568345679 terminated=False\n", + "step=139 reward=0.9825801568345679 terminated=False\n", + "step=140 reward=0.9864498413092376 terminated=False\n", + "step=141 reward=0.9976040425707386 terminated=False\n", + "step=142 reward=1.0 terminated=True\n", + "Success!\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (680, 680) to (688, 688) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).\n", + "[swscaler @ 0x5a7cec0] Warning: data is not aligned! This can lead to a speed loss\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Video of the evaluation is available in 'outputs/eval/example_pusht_diffusion/rollout.mp4'.\n" + ] + }, + { + "ename": "NameError", + "evalue": "name 'Video' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[4], line 69\u001b[0m\n\u001b[1;32m 66\u001b[0m imageio\u001b[38;5;241m.\u001b[39mmimsave(\u001b[38;5;28mstr\u001b[39m(video_path), numpy\u001b[38;5;241m.\u001b[39mstack(frames), fps\u001b[38;5;241m=\u001b[39mfps)\n\u001b[1;32m 68\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mVideo of the evaluation is available in \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mvideo_path\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 69\u001b[0m \u001b[43mVideo\u001b[49m(video_path)\n", + "\u001b[0;31mNameError\u001b[0m: name 'Video' is not defined" + ] + } + ], + "source": [ + "# Prepare to collect every rewards and all the frames of the episode,\n", + "# from initial state to final state.\n", + "rewards = []\n", + "frames = []\n", + "\n", + "# Render frame of the initial state\n", + "frames.append(env.render())\n", + "\n", + "step = 0\n", + "done = False\n", + "while not done:\n", + " # Prepare observation for the policy running in Pytorch\n", + " state = torch.from_numpy(numpy_observation[\"agent_pos\"])\n", + " image = torch.from_numpy(numpy_observation[\"pixels\"])\n", + "\n", + " # Convert to float32 with image from channel first in [0,255]\n", + " # to channel last in [0,1]\n", + " state = state.to(torch.float32)\n", + " image = image.to(torch.float32) / 255\n", + " image = image.permute(2, 0, 1)\n", + "\n", + " # Send data tensors from CPU to GPU\n", + " state = state.to(device, non_blocking=True)\n", + " image = image.to(device, non_blocking=True)\n", + "\n", + " # Add extra (empty) batch dimension, required to forward the policy\n", + " state = state.unsqueeze(0)\n", + " image = image.unsqueeze(0)\n", + "\n", + " # Create the policy input dictionary\n", + " observation = {\n", + " \"observation.state\": state,\n", + " \"observation.image\": image,\n", + " }\n", + "\n", + " # Predict the next action with respect to the current observation\n", + " with torch.inference_mode():\n", + " action = policy.select_action(observation)\n", + "\n", + " # Prepare the action for the environment\n", + " numpy_action = action.squeeze(0).to(\"cpu\").numpy()\n", + "\n", + " # Step through the environment and receive a new observation\n", + " numpy_observation, reward, terminated, truncated, info = env.step(numpy_action)\n", + " print(f\"{step=} {reward=} {terminated=}\")\n", + "\n", + " # Keep track of all the rewards and frames\n", + " rewards.append(reward)\n", + " frames.append(env.render())\n", + "\n", + " # The rollout is considered done when the success state is reach (i.e. terminated is True),\n", + " # or the maximum number of iterations is reached (i.e. truncated is True)\n", + " done = terminated | truncated | done\n", + " step += 1\n", + "\n", + "if terminated:\n", + " print(\"Success!\")\n", + "else:\n", + " print(\"Failure!\")\n", + "\n", + "# Get the speed of environment (i.e. its number of frames per second).\n", + "fps = env.metadata[\"render_fps\"]\n", + "\n", + "# Encode all frames into a mp4 video.\n", + "video_path = output_directory / \"rollout.mp4\"\n", + "imageio.mimsave(str(video_path), numpy.stack(frames), fps=fps)\n", + "\n", + "print(f\"Video of the evaluation is available in '{video_path}'.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5dd62065-1ac1-4cff-9fd6-236d490b3808", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Video(video_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f687655a-a892-4864-9bb6-8e396d8dd72c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 3, 96, 96])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "observation['observation.image'].shape" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "lerobot", + "language": "python", + "name": "lerobot" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/benchmarks/video/run_video_benchmark.py b/benchmarks/video/run_video_benchmark.py index 46806c075..e90664872 100644 --- a/benchmarks/video/run_video_benchmark.py +++ b/benchmarks/video/run_video_benchmark.py @@ -266,7 +266,7 @@ def benchmark_encoding_decoding( ) ep_num_images = dataset.episode_data_index["to"][0].item() - width, height = tuple(dataset[0][dataset.camera_keys[0]].shape[-2:]) + width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:]) num_pixels = width * height video_size_bytes = video_path.stat().st_size images_size_bytes = get_directory_size(imgs_dir) diff --git a/docker/lerobot-cpu/Dockerfile b/docker/lerobot-cpu/Dockerfile index 34f5361a8..707a6c65b 100644 --- a/docker/lerobot-cpu/Dockerfile +++ b/docker/lerobot-cpu/Dockerfile @@ -22,7 +22,7 @@ RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc COPY . /lerobot WORKDIR /lerobot RUN pip install --upgrade --no-cache-dir pip -RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" \ +RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel, accelerate]" \ --extra-index-url https://download.pytorch.org/whl/cpu # Set EGL as the rendering backend for MuJoCo diff --git a/docker/lerobot-gpu/Dockerfile b/docker/lerobot-gpu/Dockerfile index 92640cf4b..6b9429b77 100644 --- a/docker/lerobot-gpu/Dockerfile +++ b/docker/lerobot-gpu/Dockerfile @@ -24,7 +24,7 @@ RUN echo "source /opt/venv/bin/activate" >> /root/.bashrc COPY . /lerobot WORKDIR /lerobot RUN pip install --upgrade --no-cache-dir pip -RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]" +RUN pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel, accelerate]" # Set EGL as the rendering backend for MuJoCo ENV MUJOCO_GL="egl" diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md new file mode 100644 index 000000000..70e4ed8ba --- /dev/null +++ b/examples/10_use_so100.md @@ -0,0 +1,275 @@ +This tutorial explains how to use [SO-100](https://github.com/TheRobotStudio/SO-ARM100) with LeRobot. + +## Source the parts + +Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already. + +**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly. + +## Install LeRobot + +On your computer: + +1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): +```bash +mkdir -p ~/miniconda3 +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh +bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 +rm ~/miniconda3/miniconda.sh +~/miniconda3/bin/conda init bash +``` + +2. Restart shell or `source ~/.bashrc` + +3. Create and activate a fresh conda environment for lerobot +```bash +conda create -y -n lerobot python=3.10 && conda activate lerobot +``` + +4. Clone LeRobot: +```bash +git clone https://github.com/huggingface/lerobot.git ~/lerobot +``` + +5. Install LeRobot with dependencies for the feetech motors: +```bash +cd ~/lerobot && pip install -e ".[feetech]" +``` + +For Linux only (not Mac), install extra dependencies for recording datasets: +```bash +conda install -y -c conda-forge ffmpeg +pip uninstall -y opencv-python +conda install -y -c conda-forge "opencv>=4.10.0" +``` + +## Configure the motors + +Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the use of our scripts below. + +**Find USB ports associated to your arms** +To find the correct ports for each arm, run the utility script twice: +```bash +python lerobot/scripts/find_motors_bus_port.py +``` + +Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux): +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your DynamixelMotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751 +Reconnect the usb cable. +``` + +Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux): +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your DynamixelMotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the usb cable. +``` + +Troubleshooting: On Linux, you might need to give access to the USB ports by running: +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +**Configure your motors** +Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate: +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 1 +``` + +Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees). + +Then unplug your motor and plug the second motor and set its ID to 2. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 2 +``` + +Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm. + +**Remove the gears of the 6 leader motors** +Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm. + +**Add motor horn to the motors** +Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). For SO-100, you need to align the holes on the motor horn to the motor spline to be approximately 1:30, 4:30, 7:30 and 10:30. +Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated. + +## Assemble the arms + +Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm. + +## Calibrate + +Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another. + +**Manual calibration of follower arm** +/!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the auto calibration, we will actually do manual calibration of follower for now. + +You will need to move the follower arm to these positions sequentially: + +| 1. Zero position | 2. Rotated position | 3. Rest position | +|---|---|---| +| SO-100 follower arm zero position | SO-100 follower arm rotated position | SO-100 follower arm rest position | + +Make sure both arms are connected and run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py calibrate \ + --robot-path lerobot/configs/robot/so100.yaml \ + --robot-overrides '~cameras' --arms main_follower +``` + +**Manual calibration of leader arm** +Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=FioA2oeFZ5I) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially: + +| 1. Zero position | 2. Rotated position | 3. Rest position | +|---|---|---| +| SO-100 leader arm zero position | SO-100 leader arm rotated position | SO-100 leader arm rest position | + +Run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py calibrate \ + --robot-path lerobot/configs/robot/so100.yaml \ + --robot-overrides '~cameras' --arms main_leader +``` + +## Teleoperate + +**Simple teleop** +Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras): +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/so100.yaml \ + --robot-overrides '~cameras' \ + --display-cameras 0 +``` + + +**Teleop with displaying cameras** +Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset. +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/so100.yaml +``` + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset with SO-100. + +If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Store your Hugging Face repository name in a variable to run these commands: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Record 2 episodes and upload your dataset to the hub: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/so100.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/so100_test \ + --tags so100 tutorial \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 2 \ + --push-to-hub 1 +``` + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +```bash +echo ${HF_USER}/so100_test +``` + +If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +```bash +python lerobot/scripts/visualize_dataset_html.py \ + --repo-id ${HF_USER}/so100_test +``` + +## Replay an episode + +Now try to replay the first episode on your robot: +```bash +python lerobot/scripts/control_robot.py replay \ + --robot-path lerobot/configs/robot/so100.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/so100_test \ + --episode 0 +``` + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + dataset_repo_id=${HF_USER}/so100_test \ + policy=act_so100_real \ + env=so100_real \ + hydra.run.dir=outputs/train/act_so100_test \ + hydra.job.name=act_so100_test \ + device=cuda \ + wandb.enable=true +``` + +Let's explain it: +1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/so100_test`. +2. We provided the policy with `policy=act_so100_real`. This loads configurations from [`lerobot/configs/policy/act_so100_real.yaml`](../lerobot/configs/policy/act_so100_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`. +3. We provided an environment as argument with `env=so100_real`. This loads configurations from [`lerobot/configs/env/so100_real.yaml`](../lerobot/configs/env/so100_real.yaml). +4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_so100_test/checkpoints`. + +## Evaluate your policy + +You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/so100.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/eval_act_so100_test \ + --tags so100 tutorial eval \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 10 \ + -p outputs/train/act_so100_test/checkpoints/last/pretrained_model +``` + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_so100_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_so100_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_so100_test`). + +## More + +Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot. + +If you have any question or need help, please reach out on Discord in the channel [`#so100-arm`](https://discord.com/channels/1216765309076115607/1237741463832363039). diff --git a/examples/11_use_moss.md b/examples/11_use_moss.md new file mode 100644 index 000000000..55d6fcaf9 --- /dev/null +++ b/examples/11_use_moss.md @@ -0,0 +1,275 @@ +This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot. + +## Source the parts + +Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already. + +**Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly. + +## Install LeRobot + +On your computer: + +1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): +```bash +mkdir -p ~/miniconda3 +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh +bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 +rm ~/miniconda3/miniconda.sh +~/miniconda3/bin/conda init bash +``` + +2. Restart shell or `source ~/.bashrc` + +3. Create and activate a fresh conda environment for lerobot +```bash +conda create -y -n lerobot python=3.10 && conda activate lerobot +``` + +4. Clone LeRobot: +```bash +git clone https://github.com/huggingface/lerobot.git ~/lerobot +``` + +5. Install LeRobot with dependencies for the feetech motors: +```bash +cd ~/lerobot && pip install -e ".[feetech]" +``` + +For Linux only (not Mac), install extra dependencies for recording datasets: +```bash +conda install -y -c conda-forge ffmpeg +pip uninstall -y opencv-python +conda install -y -c conda-forge "opencv>=4.10.0" +``` + +## Configure the motors + +Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below. + +**Find USB ports associated to your arms** +To find the correct ports for each arm, run the utility script twice: +```bash +python lerobot/scripts/find_motors_bus_port.py +``` + +Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux): +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your DynamixelMotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751 +Reconnect the usb cable. +``` + +Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux): +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your DynamixelMotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the usb cable. +``` + +Troubleshooting: On Linux, you might need to give access to the USB ports by running: +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +**Configure your motors** +Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate: +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 1 +``` + +Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees). + +Then unplug your motor and plug the second motor and set its ID to 2. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 2 +``` + +Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm. + +**Remove the gears of the 6 leader motors** +Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm. + +**Add motor horn to the motors** +Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock. +Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated. + +## Assemble the arms + +Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm. + +## Calibrate + +Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another. + +**Manual calibration of follower arm** +/!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto calibration, we will actually do manual calibration of follower for now. + +You will need to move the follower arm to these positions sequentially: + +| 1. Zero position | 2. Rotated position | 3. Rest position | +|---|---|---| +| Moss v1 follower arm zero position | Moss v1 follower arm rotated position | Moss v1 follower arm rest position | + +Make sure both arms are connected and run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py calibrate \ + --robot-path lerobot/configs/robot/moss.yaml \ + --robot-overrides '~cameras' --arms main_follower +``` + +**Manual calibration of leader arm** +Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially: + +| 1. Zero position | 2. Rotated position | 3. Rest position | +|---|---|---| +| Moss v1 leader arm zero position | Moss v1 leader arm rotated position | Moss v1 leader arm rest position | + +Run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py calibrate \ + --robot-path lerobot/configs/robot/moss.yaml \ + --robot-overrides '~cameras' --arms main_leader +``` + +## Teleoperate + +**Simple teleop** +Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras): +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/moss.yaml \ + --robot-overrides '~cameras' \ + --display-cameras 0 +``` + + +**Teleop with displaying cameras** +Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset. +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/moss.yaml +``` + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset with Moss v1. + +If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Store your Hugging Face repository name in a variable to run these commands: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Record 2 episodes and upload your dataset to the hub: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/moss.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/moss_test \ + --tags moss tutorial \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 2 \ + --push-to-hub 1 +``` + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +```bash +echo ${HF_USER}/moss_test +``` + +If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +```bash +python lerobot/scripts/visualize_dataset_html.py \ + --repo-id ${HF_USER}/moss_test +``` + +## Replay an episode + +Now try to replay the first episode on your robot: +```bash +python lerobot/scripts/control_robot.py replay \ + --robot-path lerobot/configs/robot/moss.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/moss_test \ + --episode 0 +``` + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + dataset_repo_id=${HF_USER}/moss_test \ + policy=act_moss_real \ + env=moss_real \ + hydra.run.dir=outputs/train/act_moss_test \ + hydra.job.name=act_moss_test \ + device=cuda \ + wandb.enable=true +``` + +Let's explain it: +1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/moss_test`. +2. We provided the policy with `policy=act_moss_real`. This loads configurations from [`lerobot/configs/policy/act_moss_real.yaml`](../lerobot/configs/policy/act_moss_real.yaml). Importantly, this policy uses 2 cameras as input `laptop`, `phone`. +3. We provided an environment as argument with `env=moss_real`. This loads configurations from [`lerobot/configs/env/moss_real.yaml`](../lerobot/configs/env/moss_real.yaml). +4. We provided `device=cuda` since we are training on a Nvidia GPU, but you can also use `device=mps` if you are using a Mac with Apple silicon, or `device=cpu` otherwise. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`. + +## Evaluate your policy + +You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/moss.yaml \ + --fps 30 \ + --repo-id ${HF_USER}/eval_act_moss_test \ + --tags moss tutorial eval \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 10 \ + -p outputs/train/act_moss_test/checkpoints/last/pretrained_model +``` + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_moss_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_moss_test`). + +## More + +Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot. + +If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925). diff --git a/examples/12_train_policy_accelerate.py b/examples/12_train_policy_accelerate.py new file mode 100644 index 000000000..bc6b5b85d --- /dev/null +++ b/examples/12_train_policy_accelerate.py @@ -0,0 +1,106 @@ +""" +This script demonstrates how to train ACT policy with distributed training on the Aloha environment +on the Transfer cube task, using HuggingFace accelerate. + +Apart from the main installation procedure, please also make sure you have installed accelerate before running this script: `pip install accelerate`. + +To launch it, you will have to use the accelerate launcher, for example: +`python -m accelerate.commands.launch examples/8_train_policy_distributed.py`. This will launch the script with default distributed parameters. +To launch on two GPUs, you can use `python -m accelerate.commands.launch --num_processes 2 lerobot/examples/7_train_policy_distributed.py`. + +Find detailed information in the documentation: `https://github.com/huggingface/accelerate`. +""" + +from pathlib import Path + +import torch +from accelerate import Accelerator + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.policies.act.configuration_act import ACTConfig +from lerobot.common.policies.act.modeling_act import ACTPolicy + +# Create a directory to store the training checkpoint. +output_directory = Path("outputs/train/example_aloha_act_distributed") +output_directory.mkdir(parents=True, exist_ok=True) + +# Number of overall offline training steps +training_steps = 5000 +log_freq = 250 + +# The chunk size is the number of actions that the policy will predict. +chunk_size = 100 + +delta_timestamps = { + "action": + # Load the current action, the next 100 actions to be executed, because we train the policy + # to predict the next 100 actions. Two frames differ by 1/50 seconds, which corresponds to the FPS of this dataset. + [i / 50 for i in range(chunk_size)] +} + + +def train(): + # We prepare for distributed training using the Accelerator. + accelerator = Accelerator() + device = accelerator.device + + # Set up the dataset. + dataset = LeRobotDataset("lerobot/aloha_sim_transfer_cube_human_image", delta_timestamps=delta_timestamps) + accelerator.print(f"Loaded dataset with {len(dataset)} samples.") + + # The policy is initialized with a configuration class, in this case `ACTConfig`. + cfg = ACTConfig(chunk_size=chunk_size) + policy = ACTPolicy(cfg, dataset_stats=dataset.stats) + policy.train() + num_total_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) + accelerator.print(f"Policy initialized with {num_total_params} parameters.") + + optimizer = torch.optim.Adam(policy.parameters(), lr=1e-5) + + dataloader = torch.utils.data.DataLoader( + dataset, + num_workers=4, + batch_size=8, + shuffle=True, + pin_memory=device != torch.device("cpu"), + drop_last=True, + ) + + # Prepare the policy, optimizer, and dataloader for distributed training. + # This will wrap the policy in a DistributedDataParallel and apply torch.autocast to the forward functions. + policy, optimizer, dataloader = accelerator.prepare(policy, optimizer, dataloader) + + policy.to(device) + + step = 0 + done = False + while not done: + for batch in dataloader: + batch = {k: v.to(device) for k, v in batch.items()} + output_dict = policy.forward(batch) + + loss = output_dict["loss"].mean() + accelerator.backward(loss) + + optimizer.step() + optimizer.zero_grad() + + step += 1 + + if step % log_freq == 0: + accelerator.print(f"step: {step} loss: {loss.item():.3f}") + + if step >= training_steps: + done = True + break + + # Unwrap the policy of its distributed training wrapper and save it. + unwrapped_policy = accelerator.unwrap_model(policy) + unwrapped_policy.save_pretrained(output_directory) + + accelerator.print("Finished offline training") + + +# We need to add a call to the training function in order to be able to use the Accelerator. +if __name__ == "__main__": + train() diff --git a/examples/1_load_lerobot_dataset.py b/examples/1_load_lerobot_dataset.py index 3846926a6..96c104b68 100644 --- a/examples/1_load_lerobot_dataset.py +++ b/examples/1_load_lerobot_dataset.py @@ -3,78 +3,120 @@ It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch. Features included in this script: -- Loading a dataset and accessing its properties. -- Filtering data by episode number. -- Converting tensor data for visualization. -- Saving video files from dataset frames. +- Viewing a dataset's metadata and exploring its properties. +- Loading an existing dataset from the hub or a subset of it. +- Accessing frames by episode number. - Using advanced dataset features like timestamp-based frame selection. - Demonstrating compatibility with PyTorch DataLoader for batch processing. The script ends with examples of how to batch process data using PyTorch's DataLoader. """ -from pathlib import Path from pprint import pprint -import imageio import torch +from huggingface_hub import HfApi import lerobot -from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata +# We ported a number of existing datasets ourselves, use this to see the list: print("List of available datasets:") pprint(lerobot.available_datasets) -# Let's take one for this example -repo_id = "lerobot/pusht" - -# You can easily load a dataset from a Hugging Face repository +# You can also browse through the datasets created/ported by the community on the hub using the hub api: +hub_api = HfApi() +repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])] +pprint(repo_ids) + +# Or simply explore them in your web browser directly at: +# https://huggingface.co/datasets?other=LeRobot + +# Let's take this one for this example +repo_id = "lerobot/aloha_mobile_cabinet" +# We can have a look and fetch its metadata to know more about it: +ds_meta = LeRobotDatasetMetadata(repo_id) + +# By instantiating just this class, you can quickly access useful information about the content and the +# structure of the dataset without downloading the actual data yet (only metadata files — which are +# lightweight). +print(f"Total number of episodes: {ds_meta.total_episodes}") +print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}") +print(f"Frames per second used during data collection: {ds_meta.fps}") +print(f"Robot type: {ds_meta.robot_type}") +print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n") + +print("Tasks:") +print(ds_meta.tasks) +print("Features:") +pprint(ds_meta.features) + +# You can also get a short summary by simply printing the object: +print(ds_meta) + +# You can then load the actual dataset from the hub. +# Either load any subset of episodes: +dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23]) + +# And see how many frames you have: +print(f"Selected episodes: {dataset.episodes}") +print(f"Number of episodes selected: {dataset.num_episodes}") +print(f"Number of frames selected: {dataset.num_frames}") + +# Or simply load the entire dataset: dataset = LeRobotDataset(repo_id) +print(f"Number of episodes selected: {dataset.num_episodes}") +print(f"Number of frames selected: {dataset.num_frames}") -# LeRobotDataset is actually a thin wrapper around an underlying Hugging Face dataset -# (see https://huggingface.co/docs/datasets/index for more information). -print(dataset) -print(dataset.hf_dataset) +# The previous metadata class is contained in the 'meta' attribute of the dataset: +print(dataset.meta) -# And provides additional utilities for robotics and compatibility with Pytorch -print(f"\naverage number of frames per episode: {dataset.num_samples / dataset.num_episodes:.3f}") -print(f"frames per second used during data collection: {dataset.fps=}") -print(f"keys to access images from cameras: {dataset.camera_keys=}\n") +# LeRobotDataset actually wraps an underlying Hugging Face dataset +# (see https://huggingface.co/docs/datasets for more information). +print(dataset.hf_dataset) -# Access frame indexes associated to first episode +# LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working +# with the latter, like iterating through the dataset. +# The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by +# episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access +# frame indices associated to the first episode: episode_index = 0 from_idx = dataset.episode_data_index["from"][episode_index].item() to_idx = dataset.episode_data_index["to"][episode_index].item() -# LeRobot datasets actually subclass PyTorch datasets so you can do everything you know and love from working -# with the latter, like iterating through the dataset. Here we grab all the image frames. -frames = [dataset[idx]["observation.image"] for idx in range(from_idx, to_idx)] +# Then we grab all the image frames from the first camera: +camera_key = dataset.meta.camera_keys[0] +frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)] -# Video frames are now float32 in range [0,1] channel first (c,h,w) to follow pytorch convention. To visualize -# them, we convert to uint8 in range [0,255] -frames = [(frame * 255).type(torch.uint8) for frame in frames] -# and to channel last (h,w,c). -frames = [frame.permute((1, 2, 0)).numpy() for frame in frames] +# The objects returned by the dataset are all torch.Tensors +print(type(frames[0])) +print(frames[0].shape) -# Finally, we save the frames to a mp4 video for visualization. -Path("outputs/examples/1_load_lerobot_dataset").mkdir(parents=True, exist_ok=True) -imageio.mimsave("outputs/examples/1_load_lerobot_dataset/episode_0.mp4", frames, fps=dataset.fps) +# Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w). +# We can compare this shape with the information available for that feature +pprint(dataset.features[camera_key]) +# In particular: +print(dataset.features[camera_key]["shape"]) +# The shape is in (h, w, c) which is a more universal format. # For many machine learning applications we need to load the history of past observations or trajectories of # future actions. Our datasets can load previous and future frames for each key/modality, using timestamps # differences with the current loaded frame. For instance: delta_timestamps = { # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame - "observation.image": [-1, -0.5, -0.20, 0], - # loads 8 state vectors: 1.5 seconds before, 1 second before, ... 20 ms, 10 ms, and current frame - "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, -0.02, -0.01, 0], + camera_key: [-1, -0.5, -0.20, 0], + # loads 8 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame + "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0], # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future "action": [t / dataset.fps for t in range(64)], } +# Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any +# timestamp, you still get a valid timestamp. + dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps) -print(f"\n{dataset[0]['observation.image'].shape=}") # (4,c,h,w) -print(f"{dataset[0]['observation.state'].shape=}") # (8,c) -print(f"{dataset[0]['action'].shape=}\n") # (64,c) +print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w) +print(f"{dataset[0]['observation.state'].shape=}") # (6, c) +print(f"{dataset[0]['action'].shape=}\n") # (64, c) # Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just # PyTorch datasets. @@ -84,8 +126,9 @@ batch_size=32, shuffle=True, ) + for batch in dataloader: - print(f"{batch['observation.image'].shape=}") # (32,4,c,h,w) - print(f"{batch['observation.state'].shape=}") # (32,8,c) - print(f"{batch['action'].shape=}") # (32,64,c) + print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w) + print(f"{batch['observation.state'].shape=}") # (32, 5, c) + print(f"{batch['action'].shape=}") # (32, 64, c) break diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index c5ce0d184..935ab2dbf 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -40,7 +40,7 @@ # For this example, no arguments need to be passed because the defaults are set up for PushT. # If you're doing something different, you will likely need to change at least some of the defaults. cfg = DiffusionConfig() -policy = DiffusionPolicy(cfg, dataset_stats=dataset.stats) +policy = DiffusionPolicy(cfg, dataset_stats=dataset.meta.stats) policy.train() policy.to(device) diff --git a/examples/6_add_image_transforms.py b/examples/6_add_image_transforms.py index bdcc6d7b9..82b70f5c1 100644 --- a/examples/6_add_image_transforms.py +++ b/examples/6_add_image_transforms.py @@ -1,7 +1,7 @@ """ This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and -transforms are applied to the observation images before they are returned in the dataset's __get_item__. +transforms are applied to the observation images before they are returned in the dataset's __getitem__. """ from pathlib import Path @@ -20,7 +20,7 @@ first_idx = dataset.episode_data_index["from"][0].item() # Get the frame corresponding to the first camera -frame = dataset[first_idx][dataset.camera_keys[0]] +frame = dataset[first_idx][dataset.meta.camera_keys[0]] # Define the transformations @@ -36,7 +36,7 @@ transformed_dataset = LeRobotDataset(dataset_repo_id, image_transforms=transforms) # Get a frame from the transformed dataset -transformed_frame = transformed_dataset[first_idx][transformed_dataset.camera_keys[0]] +transformed_frame = transformed_dataset[first_idx][transformed_dataset.meta.camera_keys[0]] # Create a directory to store output images output_dir = Path("outputs/image_transforms") diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md index 50a2c6452..76408275d 100644 --- a/examples/7_get_started_with_real_robot.md +++ b/examples/7_get_started_with_real_robot.md @@ -11,7 +11,7 @@ This tutorial will guide you through the process of setting up and training a ne By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934). -This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot. +This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot. During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously. @@ -45,7 +45,7 @@ poetry install --sync --extras "dynamixel" ```bash conda install -c conda-forge ffmpeg pip uninstall opencv-python -conda install -c conda-forge opencv>=4.10.0 +conda install -c conda-forge "opencv>=4.10.0" ``` You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V. @@ -78,12 +78,12 @@ To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/ To find the correct ports for each arm, run the utility script twice: ```bash -python lerobot/common/robot_devices/motors/dynamixel.py +python lerobot/scripts/find_motors_bus_port.py ``` Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux): ``` -Finding all available ports for the DynamixelMotorsBus. +Finding all available ports for the MotorBus. ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] Remove the usb cable from your DynamixelMotorsBus and press Enter when done. @@ -95,7 +95,7 @@ Reconnect the usb cable. Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux): ``` -Finding all available ports for the DynamixelMotorsBus. +Finding all available ports for the MotorBus. ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] Remove the usb cable from your DynamixelMotorsBus and press Enter when done. @@ -778,7 +778,6 @@ Now run this to record 2 episodes: python lerobot/scripts/control_robot.py record \ --robot-path lerobot/configs/robot/koch.yaml \ --fps 30 \ - --root data \ --repo-id ${HF_USER}/koch_test \ --tags tutorial \ --warmup-time-s 5 \ @@ -787,7 +786,7 @@ python lerobot/scripts/control_robot.py record \ --num-episodes 2 ``` -This will write your dataset locally to `{root}/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). +This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot @@ -840,7 +839,6 @@ In the coming months, we plan to release a foundational model for robotics. We a You can visualize your dataset by running: ```bash python lerobot/scripts/visualize_dataset_html.py \ - --root data \ --repo-id ${HF_USER}/koch_test ``` @@ -858,7 +856,6 @@ To replay the first episode of the dataset you just recorded, run the following python lerobot/scripts/control_robot.py replay \ --robot-path lerobot/configs/robot/koch.yaml \ --fps 30 \ - --root data \ --repo-id ${HF_USER}/koch_test \ --episode 0 ``` @@ -871,7 +868,7 @@ Your robot should replicate movements similar to those you recorded. For example To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash -DATA_DIR=data python lerobot/scripts/train.py \ +python lerobot/scripts/train.py \ dataset_repo_id=${HF_USER}/koch_test \ policy=act_koch_real \ env=koch_real \ @@ -918,7 +915,6 @@ env: It should match your dataset (e.g. `fps: 30`) and your robot (e.g. `state_dim: 6` and `action_dim: 6`). We are still working on simplifying this in future versions of `lerobot`. 4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. -6. We added `DATA_DIR=data` to access your dataset stored in your local `data` directory. If you dont provide `DATA_DIR`, your dataset will be downloaded from Hugging Face hub to your cache folder `$HOME/.cache/hugginface`. In future versions of `lerobot`, both directories will be in sync. For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md) @@ -991,7 +987,6 @@ To this end, you can use the `record` function from [`lerobot/scripts/control_ro python lerobot/scripts/control_robot.py record \ --robot-path lerobot/configs/robot/koch.yaml \ --fps 30 \ - --root data \ --repo-id ${HF_USER}/eval_koch_test \ --tags tutorial eval \ --warmup-time-s 5 \ @@ -1010,7 +1005,6 @@ As you can see, it's almost the same command as previously used to record your t You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument: ```bash python lerobot/scripts/visualize_dataset.py \ - --root data \ --repo-id ${HF_USER}/eval_koch_test ``` diff --git a/examples/8_use_stretch.md b/examples/8_use_stretch.md new file mode 100644 index 000000000..c2c306f07 --- /dev/null +++ b/examples/8_use_stretch.md @@ -0,0 +1,156 @@ +This tutorial explains how to use [Stretch 3](https://hello-robot.com/stretch-3-product) with LeRobot. + +## Setup + +Familiarize yourself with Stretch by following its [tutorials](https://docs.hello-robot.com/0.3/getting_started/hello_robot/) (recommended). + +To use LeRobot on Stretch, 3 options are available: +- [tethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) +- [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup) +- ssh directly into Stretch (you will first need to install and configure openssh-server on stretch using one of the two above setups) + + +## Install LeRobot + +On Stretch's CLI, follow these steps: + +1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): +```bash +mkdir -p ~/miniconda3 +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh +bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 +rm ~/miniconda3/miniconda.sh +~/miniconda3/bin/conda init bash +``` + +2. Comment out these lines in `~/.profile` (this can mess up paths used by conda and ~/.local/bin should already be in your PATH) +``` +# set PATH so it includes user's private bin if it exists +if [ -d "$HOME/.local/bin" ] ; then + PATH="$HOME/.local/bin:$PATH" +fi +``` + +3. Restart shell or `source ~/.bashrc` + +4. Create and activate a fresh conda environment for lerobot +```bash +conda create -y -n lerobot python=3.10 && conda activate lerobot +``` + +5. Clone LeRobot: +```bash +git clone https://github.com/huggingface/lerobot.git ~/lerobot +``` + +6. Install LeRobot with stretch dependencies: +```bash +cd ~/lerobot && pip install -e ".[stretch]" +``` + +> **Note:** If you get this message, you can ignore it: `ERROR: pip's dependency resolver does not currently take into account all the packages that are installed.` + +For Linux only (not Mac), install extra dependencies for recording datasets: +```bash +conda install -y -c conda-forge ffmpeg +pip uninstall -y opencv-python +conda install -y -c conda-forge "opencv>=4.10.0" +``` + +7. Run a [system check](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#system-check) to make sure your robot is ready: +```bash +stretch_system_check.py +``` + +> **Note:** You may need to free the "robot process" after booting Stretch by running `stretch_free_robot_process.py`. For more info this Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#turning-off-gamepad-teleoperation). + +You should get something like this: +```bash +For use with S T R E T C H (R) from Hello Robot Inc. +--------------------------------------------------------------------- + +Model = Stretch 3 +Tool = DexWrist 3 w/ Gripper +Serial Number = stretch-se3-3054 + +---- Checking Hardware ---- +[Pass] Comms are ready +[Pass] Actuators are ready +[Warn] Sensors not ready (IMU AZ = -10.19 out of range -10.1 to -9.5) +[Pass] Battery voltage is 13.6 V + +---- Checking Software ---- +[Pass] Ubuntu 22.04 is ready +[Pass] All APT pkgs are setup correctly +[Pass] Firmware is up-to-date +[Pass] Python pkgs are up-to-date +[Pass] ROS2 Humble is ready +``` + +## Teleoperate, record a dataset and run a policy + +**Calibrate (Optional)** +Before operating Stretch, you need to [home](https://docs.hello-robot.com/0.3/getting_started/stretch_hardware_overview/#homing) it first. Be mindful about giving Stretch some space as this procedure will move the robot's arm and gripper. Now run this command: +```bash +python lerobot/scripts/control_robot.py calibrate \ + --robot-path lerobot/configs/robot/stretch.yaml +``` +This is equivalent to running `stretch_robot_home.py` + +> **Note:** If you run any of the LeRobot scripts below and Stretch is not poperly homed, it will automatically home/calibrate first. + +**Teleoperate** +Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation). + +Now try out teleoperation (see above documentation to learn about the gamepad controls): +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/stretch.yaml +``` +This is essentially the same as running `stretch_gamepad_teleop.py` + +**Record a dataset** +Once you're familiar with the gamepad controls and after a bit of practice, you can try to record your first dataset with Stretch. + +If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Store your Hugging Face repository name in a variable to run these commands: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Record one episode: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/stretch.yaml \ + --fps 20 \ + --repo-id ${HF_USER}/stretch_test \ + --tags stretch tutorial \ + --warmup-time-s 3 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 1 \ + --push-to-hub 0 +``` + +> **Note:** If you're using ssh to connect to Stretch and run this script, you won't be able to visualize its cameras feed (though they will still be recording). To see the cameras stream, use [tethered](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#tethered-setup) or [untethered setup](https://docs.hello-robot.com/0.3/getting_started/connecting_to_stretch/#untethered-setup). + +**Replay an episode** +Now try to replay this episode (make sure the robot's initial position is the same): +```bash +python lerobot/scripts/control_robot.py replay \ + --robot-path lerobot/configs/robot/stretch.yaml \ + --fps 20 \ + --repo-id ${HF_USER}/stretch_test \ + --episode 0 +``` + +Follow [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) to train a policy on your data and run inference on your robot. You will need to adapt the code for Stretch. + +> TODO(rcadene, aliberts): Add already setup environment and policy yaml configuration files + +If you need help, please reach out on Discord in the channel `#stretch3-mobile-arm`. diff --git a/examples/9_use_aloha.md b/examples/9_use_aloha.md new file mode 100644 index 000000000..1abf7c495 --- /dev/null +++ b/examples/9_use_aloha.md @@ -0,0 +1,174 @@ +This tutorial explains how to use [Aloha and Aloha 2 stationary](https://www.trossenrobotics.com/aloha-stationary) with LeRobot. + +## Setup + +Follow the [documentation from Trossen Robotics](https://docs.trossenrobotics.com/aloha_docs/getting_started/stationary/hardware_setup.html) for setting up the hardware and plugging the 4 arms and 4 cameras to your computer. + + +## Install LeRobot + +On your computer: + +1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): +```bash +mkdir -p ~/miniconda3 +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh +bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 +rm ~/miniconda3/miniconda.sh +~/miniconda3/bin/conda init bash +``` + +2. Restart shell or `source ~/.bashrc` + +3. Create and activate a fresh conda environment for lerobot +```bash +conda create -y -n lerobot python=3.10 && conda activate lerobot +``` + +4. Clone LeRobot: +```bash +git clone https://github.com/huggingface/lerobot.git ~/lerobot +``` + +5. Install LeRobot with dependencies for the Aloha motors (dynamixel) and cameras (intelrealsense): +```bash +cd ~/lerobot && pip install -e ".[dynamixel, intelrealsense]" +``` + +For Linux only (not Mac), install extra dependencies for recording datasets: +```bash +conda install -y -c conda-forge ffmpeg +pip uninstall -y opencv-python +conda install -y -c conda-forge "opencv>=4.10.0" +``` + +## Teleoperate + +**/!\ FOR SAFETY, READ THIS /!\** +Teleoperation consists in manually operating the leader arms to move the follower arms. Importantly: +1. Make sure your leader arms are in the same position as the follower arms, so that the follower arms don't move too fast to match the leader arms, +2. Our code assumes that your robot has been assembled following Trossen Robotics instructions. This allows us to skip calibration, as we use the pre-defined calibration files in `.cache/calibration/aloha_default`. If you replace a motor, make sure you follow the exact instructions from Trossen Robotics. + +By running the following code, you can start your first **SAFE** teleoperation: +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/aloha.yaml \ + --robot-overrides max_relative_target=5 +``` + +By adding `--robot-overrides max_relative_target=5`, we override the default value for `max_relative_target` defined in `lerobot/configs/robot/aloha.yaml`. It is expected to be `5` to limit the magnitude of the movement for more safety, but the teloperation won't be smooth. When you feel confident, you can disable this limit by adding `--robot-overrides max_relative_target=null` to the command line: +```bash +python lerobot/scripts/control_robot.py teleoperate \ + --robot-path lerobot/configs/robot/aloha.yaml \ + --robot-overrides max_relative_target=null +``` + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset with Aloha. + +If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Store your Hugging Face repository name in a variable to run these commands: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Record 2 episodes and upload your dataset to the hub: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/aloha.yaml \ + --robot-overrides max_relative_target=null \ + --fps 30 \ + --repo-id ${HF_USER}/aloha_test \ + --tags aloha tutorial \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 2 \ + --push-to-hub 1 +``` + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--push-to-hub 1`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +```bash +echo ${HF_USER}/aloha_test +``` + +If you didn't upload with `--push-to-hub 0`, you can also visualize it locally with: +```bash +python lerobot/scripts/visualize_dataset_html.py \ + --repo-id ${HF_USER}/aloha_test +``` + +## Replay an episode + +**/!\ FOR SAFETY, READ THIS /!\** +Replay consists in automatically replaying the sequence of actions (i.e. goal positions for your motors) recorded in a given dataset episode. Make sure the current initial position of your robot is similar to the one in your episode, so that your follower arms don't move too fast to go to the first goal positions. For safety, you might want to add `--robot-overrides max_relative_target=5` to your command line as explained above. + +Now try to replay the first episode on your robot: +```bash +python lerobot/scripts/control_robot.py replay \ + --robot-path lerobot/configs/robot/aloha.yaml \ + --robot-overrides max_relative_target=null \ + --fps 30 \ + --repo-id ${HF_USER}/aloha_test \ + --episode 0 +``` + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + dataset_repo_id=${HF_USER}/aloha_test \ + policy=act_aloha_real \ + env=aloha_real \ + hydra.run.dir=outputs/train/act_aloha_test \ + hydra.job.name=act_aloha_test \ + device=cuda \ + wandb.enable=true +``` + +Let's explain it: +1. We provided the dataset as argument with `dataset_repo_id=${HF_USER}/aloha_test`. +2. We provided the policy with `policy=act_aloha_real`. This loads configurations from [`lerobot/configs/policy/act_aloha_real.yaml`](../lerobot/configs/policy/act_aloha_real.yaml). Importantly, this policy uses 4 cameras as input `cam_right_wrist`, `cam_left_wrist`, `cam_high`, and `cam_low`. +3. We provided an environment as argument with `env=aloha_real`. This loads configurations from [`lerobot/configs/env/aloha_real.yaml`](../lerobot/configs/env/aloha_real.yaml). Note: this yaml defines 18 dimensions for the `state_dim` and `action_dim`, corresponding to 18 motors, not 14 motors as used in previous Aloha work. This is because, we include the `shoulder_shadow` and `elbow_shadow` motors for simplicity. +4. We provided `device=cuda` since we are training on a Nvidia GPU. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_aloha_test/checkpoints`. + +## Evaluate your policy + +You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: +```bash +python lerobot/scripts/control_robot.py record \ + --robot-path lerobot/configs/robot/aloha.yaml \ + --robot-overrides max_relative_target=null \ + --fps 30 \ + --repo-id ${HF_USER}/eval_act_aloha_test \ + --tags aloha tutorial eval \ + --warmup-time-s 5 \ + --episode-time-s 40 \ + --reset-time-s 10 \ + --num-episodes 10 \ + --num-image-writer-processes 1 \ + -p outputs/train/act_aloha_test/checkpoints/last/pretrained_model +``` + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +1. There is an additional `-p` argument which indicates the path to your policy checkpoint with (e.g. `-p outputs/train/eval_aloha_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `-p ${HF_USER}/act_aloha_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `--repo-id ${HF_USER}/eval_act_aloha_test`). +3. We use `--num-image-writer-processes 1` instead of the default value (`0`). On our computer, using a dedicated process to write images from the 4 cameras on disk allows to reach constent 30 fps during inference. Feel free to explore different values for `--num-image-writer-processes`. + +## More + +Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth explaination. + +If you have any question or need help, please reach out on Discord in the channel `#aloha-arm`. diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py index 1428014b6..00ba9930f 100644 --- a/examples/advanced/2_calculate_validation_loss.py +++ b/examples/advanced/2_calculate_validation_loss.py @@ -14,7 +14,7 @@ import torch from huggingface_hub import snapshot_download -from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy device = torch.device("cuda") @@ -41,26 +41,20 @@ } # Load the last 10% of episodes of the dataset as a validation set. -# - Load full dataset -full_dataset = LeRobotDataset("lerobot/pusht", split="train") -# - Calculate train and val subsets -num_train_episodes = math.floor(full_dataset.num_episodes * 90 / 100) -num_val_episodes = full_dataset.num_episodes - num_train_episodes -print(f"Number of episodes in full dataset: {full_dataset.num_episodes}") -print(f"Number of episodes in training dataset (90% subset): {num_train_episodes}") -print(f"Number of episodes in validation dataset (10% subset): {num_val_episodes}") -# - Get first frame index of the validation set -first_val_frame_index = full_dataset.episode_data_index["from"][num_train_episodes].item() -# - Load frames subset belonging to validation set using the `split` argument. -# It utilizes the `datasets` library's syntax for slicing datasets. -# For more information on the Slice API, please see: -# https://huggingface.co/docs/datasets/v2.19.0/loading#slice-splits -train_dataset = LeRobotDataset( - "lerobot/pusht", split=f"train[:{first_val_frame_index}]", delta_timestamps=delta_timestamps -) -val_dataset = LeRobotDataset( - "lerobot/pusht", split=f"train[{first_val_frame_index}:]", delta_timestamps=delta_timestamps -) +# - Load dataset metadata +dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht") +# - Calculate train and val episodes +total_episodes = dataset_metadata.total_episodes +episodes = list(range(dataset_metadata.total_episodes)) +num_train_episodes = math.floor(total_episodes * 90 / 100) +train_episodes = episodes[:num_train_episodes] +val_episodes = episodes[num_train_episodes:] +print(f"Number of episodes in full dataset: {total_episodes}") +print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}") +print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}") +# - Load train an val datasets +train_dataset = LeRobotDataset("lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps) +val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps) print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}") print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}") diff --git a/examples/port_datasets/pusht_zarr.py b/examples/port_datasets/pusht_zarr.py new file mode 100644 index 000000000..60df98405 --- /dev/null +++ b/examples/port_datasets/pusht_zarr.py @@ -0,0 +1,222 @@ +import shutil +from pathlib import Path + +import numpy as np +import torch + +from lerobot.common.datasets.lerobot_dataset import LEROBOT_HOME, LeRobotDataset +from lerobot.common.datasets.push_dataset_to_hub._download_raw import download_raw + +PUSHT_TASK = "Push the T-shaped blue block onto the T-shaped green target surface." +PUSHT_FEATURES = { + "observation.state": { + "dtype": "float32", + "shape": (2,), + "names": { + "axes": ["x", "y"], + }, + }, + "action": { + "dtype": "float32", + "shape": (2,), + "names": { + "axes": ["x", "y"], + }, + }, + "next.reward": { + "dtype": "float32", + "shape": (1,), + "names": None, + }, + "next.success": { + "dtype": "bool", + "shape": (1,), + "names": None, + }, + "observation.environment_state": { + "dtype": "float32", + "shape": (16,), + "names": [ + "keypoints", + ], + }, + "observation.image": { + "dtype": None, + "shape": (3, 96, 96), + "names": [ + "channel", + "height", + "width", + ], + }, +} + + +def build_features(mode: str) -> dict: + features = PUSHT_FEATURES + if mode == "keypoints": + features.pop("observation.image") + else: + features.pop("observation.environment_state") + features["observation.image"]["dtype"] = mode + + return features + + +def load_raw_dataset(zarr_path: Path): + try: + from lerobot.common.datasets.push_dataset_to_hub._diffusion_policy_replay_buffer import ( + ReplayBuffer as DiffusionPolicyReplayBuffer, + ) + except ModuleNotFoundError as e: + print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`") + raise e + + zarr_data = DiffusionPolicyReplayBuffer.copy_from_path(zarr_path) + return zarr_data + + +def calculate_coverage(zarr_data): + try: + import pymunk + from gym_pusht.envs.pusht import PushTEnv, pymunk_to_shapely + except ModuleNotFoundError as e: + print("`gym_pusht` is not installed. Please install it with `pip install 'lerobot[gym_pusht]'`") + raise e + + block_pos = zarr_data["state"][:, 2:4] + block_angle = zarr_data["state"][:, 4] + + num_frames = len(block_pos) + + coverage = np.zeros((num_frames,)) + # 8 keypoints with 2 coords each + keypoints = np.zeros((num_frames, 16)) + + # Set x, y, theta (in radians) + goal_pos_angle = np.array([256, 256, np.pi / 4]) + goal_body = PushTEnv.get_goal_pose_body(goal_pos_angle) + + for i in range(num_frames): + space = pymunk.Space() + space.gravity = 0, 0 + space.damping = 0 + + # Add walls. + walls = [ + PushTEnv.add_segment(space, (5, 506), (5, 5), 2), + PushTEnv.add_segment(space, (5, 5), (506, 5), 2), + PushTEnv.add_segment(space, (506, 5), (506, 506), 2), + PushTEnv.add_segment(space, (5, 506), (506, 506), 2), + ] + space.add(*walls) + + block_body, block_shapes = PushTEnv.add_tee(space, block_pos[i].tolist(), block_angle[i].item()) + goal_geom = pymunk_to_shapely(goal_body, block_body.shapes) + block_geom = pymunk_to_shapely(block_body, block_body.shapes) + intersection_area = goal_geom.intersection(block_geom).area + goal_area = goal_geom.area + coverage[i] = intersection_area / goal_area + keypoints[i] = torch.from_numpy(PushTEnv.get_keypoints(block_shapes).flatten()) + + return coverage, keypoints + + +def calculate_success(coverage: float, success_threshold: float): + return coverage > success_threshold + + +def calculate_reward(coverage: float, success_threshold: float): + return np.clip(coverage / success_threshold, 0, 1) + + +def main(raw_dir: Path, repo_id: str, mode: str = "video", push_to_hub: bool = True): + if mode not in ["video", "image", "keypoints"]: + raise ValueError(mode) + + if (LEROBOT_HOME / repo_id).exists(): + shutil.rmtree(LEROBOT_HOME / repo_id) + + if not raw_dir.exists(): + download_raw(raw_dir, repo_id="lerobot-raw/pusht_raw") + + zarr_data = load_raw_dataset(zarr_path=raw_dir / "pusht_cchi_v7_replay.zarr") + + env_state = zarr_data["state"][:] + agent_pos = env_state[:, :2] + + action = zarr_data["action"][:] + image = zarr_data["img"] # (b, h, w, c) + + episode_data_index = { + "from": np.concatenate(([0], zarr_data.meta["episode_ends"][:-1])), + "to": zarr_data.meta["episode_ends"], + } + + # Calculate success and reward based on the overlapping area + # of the T-object and the T-area. + coverage, keypoints = calculate_coverage(zarr_data) + success = calculate_success(coverage, success_threshold=0.95) + reward = calculate_reward(coverage, success_threshold=0.95) + + features = build_features(mode) + dataset = LeRobotDataset.create( + repo_id=repo_id, + fps=10, + robot_type="2d pointer", + features=features, + image_writer_threads=4, + ) + episodes = range(len(episode_data_index["from"])) + for ep_idx in episodes: + from_idx = episode_data_index["from"][ep_idx] + to_idx = episode_data_index["to"][ep_idx] + num_frames = to_idx - from_idx + + for frame_idx in range(num_frames): + i = from_idx + frame_idx + frame = { + "action": torch.from_numpy(action[i]), + # Shift reward and success by +1 until the last item of the episode + "next.reward": reward[i + (frame_idx < num_frames - 1)], + "next.success": success[i + (frame_idx < num_frames - 1)], + } + + frame["observation.state"] = torch.from_numpy(agent_pos[i]) + + if mode == "keypoints": + frame["observation.environment_state"] = torch.from_numpy(keypoints[i]) + else: + frame["observation.image"] = torch.from_numpy(image[i]) + + dataset.add_frame(frame) + + dataset.save_episode(task=PUSHT_TASK) + + dataset.consolidate() + + if push_to_hub: + dataset.push_to_hub() + + +if __name__ == "__main__": + # To try this script, modify the repo id with your own HuggingFace user (e.g cadene/pusht) + repo_id = "lerobot/pusht" + + modes = ["video", "image", "keypoints"] + # Uncomment if you want to try with a specific mode + # modes = ["video"] + # modes = ["image"] + # modes = ["keypoints"] + + raw_dir = Path("data/lerobot-raw/pusht_raw") + for mode in modes: + if mode in ["image", "keypoints"]: + repo_id += f"_{mode}" + + # download and load raw dataset, create LeRobotDataset, populate it, push to hub + main(raw_dir, repo_id=repo_id, mode=mode) + + # Uncomment if you want to load the local dataset and explore it + # dataset = LeRobotDataset(repo_id=repo_id, local_files_only=True) + # breakpoint() diff --git a/lerobot/__init__.py b/lerobot/__init__.py index aeae31008..3d5bb6aaa 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -28,6 +28,8 @@ print(lerobot.available_policies) print(lerobot.available_policies_per_env) print(lerobot.available_robots) + print(lerobot.available_cameras) + print(lerobot.available_motors) ``` When implementing a new dataset loadable with LeRobotDataset follow these steps: @@ -179,8 +181,8 @@ "lerobot/usc_cloth_sim", ] -available_datasets = list( - itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets) +available_datasets = sorted( + set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)) ) # lists all available policies from `lerobot/common/policies` @@ -196,6 +198,20 @@ "koch", "koch_bimanual", "aloha", + "so100", + "moss", +] + +# lists all available cameras from `lerobot/common/robot_devices/cameras` +available_cameras = [ + "opencv", + "intelrealsense", +] + +# lists all available motors from `lerobot/common/robot_devices/motors` +available_motors = [ + "dynamixel", + "feetech", ] # keys and values refer to yaml files @@ -203,7 +219,9 @@ "aloha": ["act"], "pusht": ["diffusion", "vqbet"], "xarm": ["tdmpc"], - "dora_aloha_real": ["act_real"], + "koch_real": ["act_koch_real"], + "aloha_real": ["act_aloha_real"], + "dora_aloha_real": ["act_aloha_real"], } env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks] diff --git a/lerobot/common/datasets/card_template.md b/lerobot/common/datasets/card_template.md new file mode 100644 index 000000000..7ee27df95 --- /dev/null +++ b/lerobot/common/datasets/card_template.md @@ -0,0 +1,27 @@ +--- +# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/datasets-cards +{{ card_data }} +--- + +This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). + +## Dataset Description + +{{ dataset_description | default("", true) }} + +- **Homepage:** {{ url | default("[More Information Needed]", true)}} +- **Paper:** {{ paper | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} + +## Dataset Structure + +{{ dataset_structure | default("[More Information Needed]", true)}} + +## Citation + +**BibTeX:** + +```bibtex +{{ citation_bibtex | default("[More Information Needed]", true)}} +``` diff --git a/lerobot/common/datasets/compute_stats.py b/lerobot/common/datasets/compute_stats.py index 208284465..c62116994 100644 --- a/lerobot/common/datasets/compute_stats.py +++ b/lerobot/common/datasets/compute_stats.py @@ -19,9 +19,6 @@ import einops import torch import tqdm -from datasets import Image - -from lerobot.common.datasets.video_utils import VideoFrame def get_stats_einops_patterns(dataset, num_workers=0): @@ -39,15 +36,13 @@ def get_stats_einops_patterns(dataset, num_workers=0): batch = next(iter(dataloader)) stats_patterns = {} - for key, feats_type in dataset.features.items(): - # NOTE: skip language_instruction embedding in stats computation - if key == "language_instruction": - continue + for key in dataset.features: # sanity check that tensors are not float64 assert batch[key].dtype != torch.float64 - if isinstance(feats_type, (VideoFrame, Image)): + # if isinstance(feats_type, (VideoFrame, Image)): + if key in dataset.meta.camera_keys: # sanity check that images are channel first _, c, h, w = batch[key].shape assert c < h and c < w, f"expect channel first images, but instead {batch[key].shape}" @@ -63,12 +58,12 @@ def get_stats_einops_patterns(dataset, num_workers=0): elif batch[key].ndim == 1: stats_patterns[key] = "b -> 1" else: - raise ValueError(f"{key}, {feats_type}, {batch[key].shape}") + raise ValueError(f"{key}, {batch[key].shape}") return stats_patterns -def compute_stats(dataset, batch_size=32, num_workers=16, max_num_samples=None): +def compute_stats(dataset, batch_size=8, num_workers=8, max_num_samples=None): """Compute mean/std and min/max statistics of all data keys in a LeRobotDataset.""" if max_num_samples is None: max_num_samples = len(dataset) @@ -175,39 +170,45 @@ def aggregate_stats(ls_datasets) -> dict[str, torch.Tensor]: """ data_keys = set() for dataset in ls_datasets: - data_keys.update(dataset.stats.keys()) + data_keys.update(dataset.meta.stats.keys()) stats = {k: {} for k in data_keys} for data_key in data_keys: for stat_key in ["min", "max"]: # compute `max(dataset_0["max"], dataset_1["max"], ...)` stats[data_key][stat_key] = einops.reduce( - torch.stack([d.stats[data_key][stat_key] for d in ls_datasets if data_key in d.stats], dim=0), + torch.stack( + [ds.meta.stats[data_key][stat_key] for ds in ls_datasets if data_key in ds.meta.stats], + dim=0, + ), "n ... -> ...", stat_key, ) - total_samples = sum(d.num_samples for d in ls_datasets if data_key in d.stats) + total_samples = sum(d.num_frames for d in ls_datasets if data_key in d.meta.stats) # Compute the "sum" statistic by multiplying each mean by the number of samples in the respective # dataset, then divide by total_samples to get the overall "mean". - # NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of + # NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of # numerical overflow! stats[data_key]["mean"] = sum( - d.stats[data_key]["mean"] * (d.num_samples / total_samples) + d.meta.stats[data_key]["mean"] * (d.num_frames / total_samples) for d in ls_datasets - if data_key in d.stats + if data_key in d.meta.stats ) # The derivation for standard deviation is a little more involved but is much in the same spirit as # the computation of the mean. # Given two sets of data where the statistics are known: # σ_combined = sqrt[ (n1 * (σ1^2 + d1^2) + n2 * (σ2^2 + d2^2)) / (n1 + n2) ] # where d1 = μ1 - μ_combined, d2 = μ2 - μ_combined - # NOTE: the brackets around (d.num_samples / total_samples) are needed tor minimize the risk of + # NOTE: the brackets around (d.num_frames / total_samples) are needed tor minimize the risk of # numerical overflow! stats[data_key]["std"] = torch.sqrt( sum( - (d.stats[data_key]["std"] ** 2 + (d.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2) - * (d.num_samples / total_samples) + ( + d.meta.stats[data_key]["std"] ** 2 + + (d.meta.stats[data_key]["mean"] - stats[data_key]["mean"]) ** 2 + ) + * (d.num_frames / total_samples) for d in ls_datasets - if data_key in d.stats + if data_key in d.meta.stats ) ) return stats diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 96a353fbf..f6164ed1d 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -91,9 +91,9 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData ) if isinstance(cfg.dataset_repo_id, str): + # TODO (aliberts): add 'episodes' arg from config after removing hydra dataset = LeRobotDataset( cfg.dataset_repo_id, - split=split, delta_timestamps=cfg.training.get("delta_timestamps"), image_transforms=image_transforms, video_backend=cfg.video_backend, @@ -101,7 +101,6 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData else: dataset = MultiLeRobotDataset( cfg.dataset_repo_id, - split=split, delta_timestamps=cfg.training.get("delta_timestamps"), image_transforms=image_transforms, video_backend=cfg.video_backend, @@ -112,6 +111,6 @@ def make_dataset(cfg, split: str = "train") -> LeRobotDataset | MultiLeRobotData for stats_type, listconfig in stats_dict.items(): # example of stats_type: min, max, mean, std stats = OmegaConf.to_container(listconfig, resolve=True) - dataset.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) + dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) return dataset diff --git a/lerobot/common/datasets/image_writer.py b/lerobot/common/datasets/image_writer.py new file mode 100644 index 000000000..9564fb591 --- /dev/null +++ b/lerobot/common/datasets/image_writer.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import queue +import threading +from pathlib import Path + +import numpy as np +import PIL.Image +import torch + + +def safe_stop_image_writer(func): + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + dataset = kwargs.get("dataset", None) + image_writer = getattr(dataset, "image_writer", None) if dataset else None + if image_writer is not None: + print("Waiting for image writer to terminate...") + image_writer.stop() + raise e + + return wrapper + + +def image_array_to_image(image_array: np.ndarray) -> PIL.Image.Image: + # TODO(aliberts): handle 1 channel and 4 for depth images + if image_array.ndim == 3 and image_array.shape[0] in [1, 3]: + # Transpose from pytorch convention (C, H, W) to (H, W, C) + image_array = image_array.transpose(1, 2, 0) + if image_array.dtype != np.uint8: + # Assume the image is in [0, 1] range for floating-point data + image_array = np.clip(image_array, 0, 1) + image_array = (image_array * 255).astype(np.uint8) + return PIL.Image.fromarray(image_array) + + +def write_image(image: np.ndarray | PIL.Image.Image, fpath: Path): + try: + if isinstance(image, np.ndarray): + img = image_array_to_image(image) + elif isinstance(image, PIL.Image.Image): + img = image + else: + raise TypeError(f"Unsupported image type: {type(image)}") + img.save(fpath) + except Exception as e: + print(f"Error writing image {fpath}: {e}") + + +def worker_thread_loop(queue: queue.Queue): + while True: + item = queue.get() + if item is None: + queue.task_done() + break + image_array, fpath = item + write_image(image_array, fpath) + queue.task_done() + + +def worker_process(queue: queue.Queue, num_threads: int): + threads = [] + for _ in range(num_threads): + t = threading.Thread(target=worker_thread_loop, args=(queue,)) + t.daemon = True + t.start() + threads.append(t) + for t in threads: + t.join() + + +class AsyncImageWriter: + """ + This class abstract away the initialisation of processes or/and threads to + save images on disk asynchrounously, which is critical to control a robot and record data + at a high frame rate. + + When `num_processes=0`, it creates a threads pool of size `num_threads`. + When `num_processes>0`, it creates processes pool of size `num_processes`, where each subprocess starts + their own threads pool of size `num_threads`. + + The optimal number of processes and threads depends on your computer capabilities. + We advise to use 4 threads per camera with 0 processes. If the fps is not stable, try to increase or lower + the number of threads. If it is still not stable, try to use 1 subprocess, or more. + """ + + def __init__(self, num_processes: int = 0, num_threads: int = 1): + self.num_processes = num_processes + self.num_threads = num_threads + self.queue = None + self.threads = [] + self.processes = [] + self._stopped = False + + if num_threads <= 0 and num_processes <= 0: + raise ValueError("Number of threads and processes must be greater than zero.") + + if self.num_processes == 0: + # Use threading + self.queue = queue.Queue() + for _ in range(self.num_threads): + t = threading.Thread(target=worker_thread_loop, args=(self.queue,)) + t.daemon = True + t.start() + self.threads.append(t) + else: + # Use multiprocessing + self.queue = multiprocessing.JoinableQueue() + for _ in range(self.num_processes): + p = multiprocessing.Process(target=worker_process, args=(self.queue, self.num_threads)) + p.daemon = True + p.start() + self.processes.append(p) + + def save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path): + if isinstance(image, torch.Tensor): + # Convert tensor to numpy array to minimize main process time + image = image.cpu().numpy() + self.queue.put((image, fpath)) + + def wait_until_done(self): + self.queue.join() + + def stop(self): + if self._stopped: + return + + if self.num_processes == 0: + for _ in self.threads: + self.queue.put(None) + for t in self.threads: + t.join() + else: + num_nones = self.num_processes * self.num_threads + for _ in range(num_nones): + self.queue.put(None) + for p in self.processes: + p.join() + if p.is_alive(): + p.terminate() + self.queue.close() + self.queue.join_thread() + + self._stopped = True diff --git a/lerobot/common/datasets/lerobot_dataset.py b/lerobot/common/datasets/lerobot_dataset.py index eb76f78d6..b32cf7095 100644 --- a/lerobot/common/datasets/lerobot_dataset.py +++ b/lerobot/common/datasets/lerobot_dataset.py @@ -15,202 +15,946 @@ # limitations under the License. import logging import os +import shutil +from functools import cached_property from pathlib import Path from typing import Callable import datasets +import numpy as np +import PIL.Image import torch import torch.utils +from datasets import load_dataset +from huggingface_hub import create_repo, snapshot_download, upload_folder -from lerobot.common.datasets.compute_stats import aggregate_stats +from lerobot.common.datasets.compute_stats import aggregate_stats, compute_stats +from lerobot.common.datasets.image_writer import AsyncImageWriter, write_image from lerobot.common.datasets.utils import ( - calculate_episode_data_index, - load_episode_data_index, - load_hf_dataset, + DEFAULT_FEATURES, + DEFAULT_IMAGE_PATH, + EPISODES_PATH, + INFO_PATH, + STATS_PATH, + TASKS_PATH, + append_jsonlines, + check_delta_timestamps, + check_timestamps_sync, + check_version_compatibility, + create_branch, + create_empty_dataset_info, + create_lerobot_dataset_card, + get_delta_indices, + get_episode_data_index, + get_features_from_robot, + get_hf_features_from_features, + get_hub_safe_version, + hf_transform_to_torch, + load_episodes, load_info, - load_previous_and_future_frames, load_stats, - load_videos, - reset_episode_index, + load_tasks, + serialize_dict, + write_json, + write_parquet, ) -from lerobot.common.datasets.video_utils import VideoFrame, load_from_videos +from lerobot.common.datasets.video_utils import ( + VideoFrame, + decode_video_frames_torchvision, + encode_video_frames, + get_video_info, +) +from lerobot.common.robot_devices.robots.utils import Robot # For maintainers, see lerobot/common/datasets/push_dataset_to_hub/CODEBASE_VERSION.md -CODEBASE_VERSION = "v1.6" -DATA_DIR = Path(os.environ["DATA_DIR"]) if "DATA_DIR" in os.environ else None +CODEBASE_VERSION = "v2.0" +LEROBOT_HOME = Path(os.getenv("LEROBOT_HOME", "~/.cache/huggingface/lerobot")).expanduser() + + +class LeRobotDatasetMetadata: + def __init__( + self, + repo_id: str, + root: str | Path | None = None, + local_files_only: bool = False, + ): + self.repo_id = repo_id + self.root = Path(root) if root is not None else LEROBOT_HOME / repo_id + self.local_files_only = local_files_only + + # Load metadata + (self.root / "meta").mkdir(exist_ok=True, parents=True) + self.pull_from_repo(allow_patterns="meta/") + self.info = load_info(self.root) + self.stats = load_stats(self.root) + self.tasks = load_tasks(self.root) + self.episodes = load_episodes(self.root) + + def pull_from_repo( + self, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + ) -> None: + snapshot_download( + self.repo_id, + repo_type="dataset", + revision=self._hub_version, + local_dir=self.root, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + local_files_only=self.local_files_only, + ) + + @cached_property + def _hub_version(self) -> str | None: + return None if self.local_files_only else get_hub_safe_version(self.repo_id, CODEBASE_VERSION) + + @property + def _version(self) -> str: + """Codebase version used to create this dataset.""" + return self.info["codebase_version"] + + def get_data_file_path(self, ep_index: int) -> Path: + ep_chunk = self.get_episode_chunk(ep_index) + fpath = self.data_path.format(episode_chunk=ep_chunk, episode_index=ep_index) + return Path(fpath) + + def get_video_file_path(self, ep_index: int, vid_key: str) -> Path: + ep_chunk = self.get_episode_chunk(ep_index) + fpath = self.video_path.format(episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_index) + return Path(fpath) + + def get_episode_chunk(self, ep_index: int) -> int: + return ep_index // self.chunks_size + + @property + def data_path(self) -> str: + """Formattable string for the parquet files.""" + return self.info["data_path"] + + @property + def video_path(self) -> str | None: + """Formattable string for the video files.""" + return self.info["video_path"] + + @property + def robot_type(self) -> str | None: + """Robot type used in recording this dataset.""" + return self.info["robot_type"] + + @property + def fps(self) -> int: + """Frames per second used during data collection.""" + return self.info["fps"] + + @property + def features(self) -> dict[str, dict]: + """All features contained in the dataset.""" + return self.info["features"] + + @property + def image_keys(self) -> list[str]: + """Keys to access visual modalities stored as images.""" + return [key for key, ft in self.features.items() if ft["dtype"] == "image"] + + @property + def video_keys(self) -> list[str]: + """Keys to access visual modalities stored as videos.""" + return [key for key, ft in self.features.items() if ft["dtype"] == "video"] + + @property + def camera_keys(self) -> list[str]: + """Keys to access visual modalities (regardless of their storage method).""" + return [key for key, ft in self.features.items() if ft["dtype"] in ["video", "image"]] + + @property + def names(self) -> dict[str, list | dict]: + """Names of the various dimensions of vector modalities.""" + return {key: ft["names"] for key, ft in self.features.items()} + + @property + def shapes(self) -> dict: + """Shapes for the different features.""" + return {key: tuple(ft["shape"]) for key, ft in self.features.items()} + + @property + def total_episodes(self) -> int: + """Total number of episodes available.""" + return self.info["total_episodes"] + + @property + def total_frames(self) -> int: + """Total number of frames saved in this dataset.""" + return self.info["total_frames"] + + @property + def total_tasks(self) -> int: + """Total number of different tasks performed in this dataset.""" + return self.info["total_tasks"] + + @property + def total_chunks(self) -> int: + """Total number of chunks (groups of episodes).""" + return self.info["total_chunks"] + + @property + def chunks_size(self) -> int: + """Max number of episodes per chunk.""" + return self.info["chunks_size"] + + @property + def task_to_task_index(self) -> dict: + return {task: task_idx for task_idx, task in self.tasks.items()} + + def get_task_index(self, task: str) -> int: + """ + Given a task in natural language, returns its task_index if the task already exists in the dataset, + otherwise creates a new task_index. + """ + task_index = self.task_to_task_index.get(task, None) + return task_index if task_index is not None else self.total_tasks + + def save_episode(self, episode_index: int, episode_length: int, task: str, task_index: int) -> None: + self.info["total_episodes"] += 1 + self.info["total_frames"] += episode_length + + if task_index not in self.tasks: + self.info["total_tasks"] += 1 + self.tasks[task_index] = task + task_dict = { + "task_index": task_index, + "task": task, + } + append_jsonlines(task_dict, self.root / TASKS_PATH) + + chunk = self.get_episode_chunk(episode_index) + if chunk >= self.total_chunks: + self.info["total_chunks"] += 1 + + self.info["splits"] = {"train": f"0:{self.info['total_episodes']}"} + self.info["total_videos"] += len(self.video_keys) + write_json(self.info, self.root / INFO_PATH) + + episode_dict = { + "episode_index": episode_index, + "tasks": [task], + "length": episode_length, + } + self.episodes.append(episode_dict) + append_jsonlines(episode_dict, self.root / EPISODES_PATH) + + # TODO(aliberts): refactor stats in save_episodes + # image_sampling = int(self.fps / 2) # sample 2 img/s for the stats + # ep_stats = compute_episode_stats(episode_buffer, self.features, episode_length, image_sampling=image_sampling) + # ep_stats = serialize_dict(ep_stats) + # append_jsonlines(ep_stats, self.root / STATS_PATH) + + def write_video_info(self) -> None: + """ + Warning: this function writes info from first episode videos, implicitly assuming that all videos have + been encoded the same way. Also, this means it assumes the first episode exists. + """ + for key in self.video_keys: + if not self.features[key].get("info", None): + video_path = self.root / self.get_video_file_path(ep_index=0, vid_key=key) + self.info["features"][key]["info"] = get_video_info(video_path) + + write_json(self.info, self.root / INFO_PATH) + + def __repr__(self): + feature_keys = list(self.features) + return ( + f"{self.__class__.__name__}({{\n" + f" Repository ID: '{self.repo_id}',\n" + f" Total episodes: '{self.total_episodes}',\n" + f" Total frames: '{self.total_frames}',\n" + f" Features: '{feature_keys}',\n" + "})',\n" + ) + + @classmethod + def create( + cls, + repo_id: str, + fps: int, + root: str | Path | None = None, + robot: Robot | None = None, + robot_type: str | None = None, + features: dict | None = None, + use_videos: bool = True, + ) -> "LeRobotDatasetMetadata": + """Creates metadata for a LeRobotDataset.""" + obj = cls.__new__(cls) + obj.repo_id = repo_id + obj.root = Path(root) if root is not None else LEROBOT_HOME / repo_id + + obj.root.mkdir(parents=True, exist_ok=False) + + if robot is not None: + features = get_features_from_robot(robot, use_videos) + robot_type = robot.robot_type + if not all(cam.fps == fps for cam in robot.cameras.values()): + logging.warning( + f"Some cameras in your {robot.robot_type} robot don't have an fps matching the fps of your dataset." + "In this case, frames from lower fps cameras will be repeated to fill in the blanks." + ) + elif features is None: + raise ValueError( + "Dataset features must either come from a Robot or explicitly passed upon creation." + ) + else: + # TODO(aliberts, rcadene): implement sanity check for features + features = {**features, **DEFAULT_FEATURES} + + obj.tasks, obj.stats, obj.episodes = {}, {}, [] + obj.info = create_empty_dataset_info(CODEBASE_VERSION, fps, robot_type, features, use_videos) + if len(obj.video_keys) > 0 and not use_videos: + raise ValueError() + write_json(obj.info, obj.root / INFO_PATH) + obj.local_files_only = True + return obj class LeRobotDataset(torch.utils.data.Dataset): def __init__( self, repo_id: str, - root: Path | None = DATA_DIR, - split: str = "train", + root: str | Path | None = None, + episodes: list[int] | None = None, image_transforms: Callable | None = None, delta_timestamps: dict[list[float]] | None = None, + tolerance_s: float = 1e-4, + download_videos: bool = True, + local_files_only: bool = False, video_backend: str | None = None, ): + """ + 2 modes are available for instantiating this class, depending on 2 different use cases: + + 1. Your dataset already exists: + - On your local disk in the 'root' folder. This is typically the case when you recorded your + dataset locally and you may or may not have pushed it to the hub yet. Instantiating this class + with 'root' will load your dataset directly from disk. This can happen while you're offline (no + internet connection), in that case, use local_files_only=True. + + - On the Hugging Face Hub at the address https://huggingface.co/datasets/{repo_id} and not on + your local disk in the 'root' folder. Instantiating this class with this 'repo_id' will download + the dataset from that address and load it, pending your dataset is compliant with + codebase_version v2.0. If your dataset has been created before this new format, you will be + prompted to convert it using our conversion script from v1.6 to v2.0, which you can find at + lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py. + + + 2. Your dataset doesn't already exists (either on local disk or on the Hub): you can create an empty + LeRobotDataset with the 'create' classmethod. This can be used for recording a dataset or port an + existing dataset to the LeRobotDataset format. + + + In terms of files, LeRobotDataset encapsulates 3 main things: + - metadata: + - info contains various information about the dataset like shapes, keys, fps etc. + - stats stores the dataset statistics of the different modalities for normalization + - tasks contains the prompts for each task of the dataset, which can be used for + task-conditionned training. + - hf_dataset (from datasets.Dataset), which will read any values from parquet files. + - videos (optional) from which frames are loaded to be synchronous with data from parquet files. + + A typical LeRobotDataset looks like this from its root path: + . + ├── data + │ ├── chunk-000 + │ │ ├── episode_000000.parquet + │ │ ├── episode_000001.parquet + │ │ ├── episode_000002.parquet + │ │ └── ... + │ ├── chunk-001 + │ │ ├── episode_001000.parquet + │ │ ├── episode_001001.parquet + │ │ ├── episode_001002.parquet + │ │ └── ... + │ └── ... + ├── meta + │ ├── episodes.jsonl + │ ├── info.json + │ ├── stats.json + │ └── tasks.jsonl + └── videos + ├── chunk-000 + │ ├── observation.images.laptop + │ │ ├── episode_000000.mp4 + │ │ ├── episode_000001.mp4 + │ │ ├── episode_000002.mp4 + │ │ └── ... + │ ├── observation.images.phone + │ │ ├── episode_000000.mp4 + │ │ ├── episode_000001.mp4 + │ │ ├── episode_000002.mp4 + │ │ └── ... + ├── chunk-001 + └── ... + + Note that this file-based structure is designed to be as versatile as possible. The files are split by + episodes which allows a more granular control over which episodes one wants to use and download. The + structure of the dataset is entirely described in the info.json file, which can be easily downloaded + or viewed directly on the hub before downloading any actual data. The type of files used are very + simple and do not need complex tools to be read, it only uses .parquet, .json and .mp4 files (and .md + for the README). + + Args: + repo_id (str): This is the repo id that will be used to fetch the dataset. Locally, the dataset + will be stored under root/repo_id. + root (Path | None, optional): Local directory to use for downloading/writing files. You can also + set the LEROBOT_HOME environment variable to point to a different location. Defaults to + '~/.cache/huggingface/lerobot'. + episodes (list[int] | None, optional): If specified, this will only load episodes specified by + their episode_index in this list. Defaults to None. + image_transforms (Callable | None, optional): You can pass standard v2 image transforms from + torchvision.transforms.v2 here which will be applied to visual modalities (whether they come + from videos or images). Defaults to None. + delta_timestamps (dict[list[float]] | None, optional): _description_. Defaults to None. + tolerance_s (float, optional): Tolerance in seconds used to ensure data timestamps are actually in + sync with the fps value. It is used at the init of the dataset to make sure that each + timestamps is separated to the next by 1/fps +/- tolerance_s. This also applies to frames + decoded from video files. It is also used to check that `delta_timestamps` (when provided) are + multiples of 1/fps. Defaults to 1e-4. + download_videos (bool, optional): Flag to download the videos. Note that when set to True but the + video files are already present on local disk, they won't be downloaded again. Defaults to + True. + local_files_only (bool, optional): Flag to use local files only. If True, no requests to the hub + will be made. Defaults to False. + video_backend (str | None, optional): Video backend to use for decoding videos. There is currently + a single option which is the pyav decoder used by Torchvision. Defaults to pyav. + """ super().__init__() self.repo_id = repo_id - self.root = root - self.split = split + self.root = Path(root) if root else LEROBOT_HOME / repo_id self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps - # load data from hub or locally when root is provided + self.episodes = episodes + self.tolerance_s = tolerance_s + self.video_backend = video_backend if video_backend else "pyav" + self.delta_indices = None + self.local_files_only = local_files_only + + # Unused attributes + self.image_writer = None + self.episode_buffer = None + + self.root.mkdir(exist_ok=True, parents=True) + + # Load metadata + self.meta = LeRobotDatasetMetadata(self.repo_id, self.root, self.local_files_only) + + # Check version + check_version_compatibility(self.repo_id, self.meta._version, CODEBASE_VERSION) + + # Load actual data + self.download_episodes(download_videos) + self.hf_dataset = self.load_hf_dataset() + self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes) + + # Check timestamps + check_timestamps_sync(self.hf_dataset, self.episode_data_index, self.fps, self.tolerance_s) + + # Setup delta_indices + if self.delta_timestamps is not None: + check_delta_timestamps(self.delta_timestamps, self.fps, self.tolerance_s) + self.delta_indices = get_delta_indices(self.delta_timestamps, self.fps) + + # Available stats implies all videos have been encoded and dataset is iterable + self.consolidated = self.meta.stats is not None + + def push_to_hub( + self, + tags: list | None = None, + license: str | None = "apache-2.0", + push_videos: bool = True, + private: bool = False, + **card_kwargs, + ) -> None: + if not self.consolidated: + logging.warning( + "You are trying to upload to the hub a LeRobotDataset that has not been consolidated yet. " + "Consolidating first." + ) + self.consolidate() + + ignore_patterns = ["images/"] + if not push_videos: + ignore_patterns.append("videos/") + + create_repo( + repo_id=self.repo_id, + private=private, + repo_type="dataset", + exist_ok=True, + ) + + upload_folder( + repo_id=self.repo_id, + folder_path=self.root, + repo_type="dataset", + ignore_patterns=ignore_patterns, + ) + card = create_lerobot_dataset_card( + tags=tags, dataset_info=self.meta.info, license=license, **card_kwargs + ) + card.push_to_hub(repo_id=self.repo_id, repo_type="dataset") + create_branch(repo_id=self.repo_id, branch=CODEBASE_VERSION, repo_type="dataset") + + def pull_from_repo( + self, + allow_patterns: list[str] | str | None = None, + ignore_patterns: list[str] | str | None = None, + ) -> None: + snapshot_download( + self.repo_id, + repo_type="dataset", + revision=self.meta._hub_version, + local_dir=self.root, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + local_files_only=self.local_files_only, + ) + + def download_episodes(self, download_videos: bool = True) -> None: + """Downloads the dataset from the given 'repo_id' at the provided version. If 'episodes' is given, this + will only download those episodes (selected by their episode_index). If 'episodes' is None, the whole + dataset will be downloaded. Thanks to the behavior of snapshot_download, if the files are already present + in 'local_dir', they won't be downloaded again. + """ # TODO(rcadene, aliberts): implement faster transfer # https://huggingface.co/docs/huggingface_hub/en/guides/download#faster-downloads - self.hf_dataset = load_hf_dataset(repo_id, CODEBASE_VERSION, root, split) - if split == "train": - self.episode_data_index = load_episode_data_index(repo_id, CODEBASE_VERSION, root) + files = None + ignore_patterns = None if download_videos else "videos/" + if self.episodes is not None: + files = [str(self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes] + if len(self.meta.video_keys) > 0 and download_videos: + video_files = [ + str(self.meta.get_video_file_path(ep_idx, vid_key)) + for vid_key in self.meta.video_keys + for ep_idx in self.episodes + ] + files += video_files + + self.pull_from_repo(allow_patterns=files, ignore_patterns=ignore_patterns) + + def load_hf_dataset(self) -> datasets.Dataset: + """hf_dataset contains all the observations, states, actions, rewards, etc.""" + if self.episodes is None: + path = str(self.root / "data") + hf_dataset = load_dataset("parquet", data_dir=path, split="train") else: - self.episode_data_index = calculate_episode_data_index(self.hf_dataset) - self.hf_dataset = reset_episode_index(self.hf_dataset) - self.stats = load_stats(repo_id, CODEBASE_VERSION, root) - self.info = load_info(repo_id, CODEBASE_VERSION, root) - if self.video: - self.videos_dir = load_videos(repo_id, CODEBASE_VERSION, root) - self.video_backend = video_backend if video_backend is not None else "pyav" + files = [str(self.root / self.meta.get_data_file_path(ep_idx)) for ep_idx in self.episodes] + hf_dataset = load_dataset("parquet", data_files=files, split="train") + + # TODO(aliberts): hf_dataset.set_format("torch") + hf_dataset.set_transform(hf_transform_to_torch) + + return hf_dataset @property def fps(self) -> int: """Frames per second used during data collection.""" - return self.info["fps"] + return self.meta.fps @property - def video(self) -> bool: - """Returns True if this dataset loads video frames from mp4 files. - Returns False if it only loads images from png files. - """ - return self.info.get("video", False) + def num_frames(self) -> int: + """Number of frames in selected episodes.""" + return len(self.hf_dataset) if self.hf_dataset is not None else self.meta.total_frames @property - def features(self) -> datasets.Features: - return self.hf_dataset.features + def num_episodes(self) -> int: + """Number of episodes selected.""" + return len(self.episodes) if self.episodes is not None else self.meta.total_episodes @property - def camera_keys(self) -> list[str]: - """Keys to access image and video stream from cameras.""" - keys = [] - for key, feats in self.hf_dataset.features.items(): - if isinstance(feats, (datasets.Image, VideoFrame)): - keys.append(key) - return keys + def features(self) -> dict[str, dict]: + return self.meta.features @property - def video_frame_keys(self) -> list[str]: - """Keys to access video frames that requires to be decoded into images. + def hf_features(self) -> datasets.Features: + """Features of the hf_dataset.""" + if self.hf_dataset is not None: + return self.hf_dataset.features + else: + return get_hf_features_from_features(self.features) - Note: It is empty if the dataset contains images only, - or equal to `self.cameras` if the dataset contains videos only, - or can even be a subset of `self.cameras` in a case of a mixed image/video dataset. - """ - video_frame_keys = [] - for key, feats in self.hf_dataset.features.items(): - if isinstance(feats, VideoFrame): - video_frame_keys.append(key) - return video_frame_keys + def _get_query_indices(self, idx: int, ep_idx: int) -> tuple[dict[str, list[int | bool]]]: + ep_start = self.episode_data_index["from"][ep_idx] + ep_end = self.episode_data_index["to"][ep_idx] + query_indices = { + key: [max(ep_start.item(), min(ep_end.item() - 1, idx + delta)) for delta in delta_idx] + for key, delta_idx in self.delta_indices.items() + } + padding = { # Pad values outside of current episode range + f"{key}_is_pad": torch.BoolTensor( + [(idx + delta < ep_start.item()) | (idx + delta >= ep_end.item()) for delta in delta_idx] + ) + for key, delta_idx in self.delta_indices.items() + } + return query_indices, padding - @property - def num_samples(self) -> int: - """Number of samples/frames.""" - return len(self.hf_dataset) + def _get_query_timestamps( + self, + current_ts: float, + query_indices: dict[str, list[int]] | None = None, + ) -> dict[str, list[float]]: + query_timestamps = {} + for key in self.meta.video_keys: + if query_indices is not None and key in query_indices: + timestamps = self.hf_dataset.select(query_indices[key])["timestamp"] + query_timestamps[key] = torch.stack(timestamps).tolist() + else: + query_timestamps[key] = [current_ts] - @property - def num_episodes(self) -> int: - """Number of episodes.""" - return len(self.hf_dataset.unique("episode_index")) + return query_timestamps - @property - def tolerance_s(self) -> float: - """Tolerance in seconds used to discard loaded frames when their timestamps - are not close enough from the requested frames. It is only used when `delta_timestamps` - is provided or when loading video frames from mp4 files. + def _query_hf_dataset(self, query_indices: dict[str, list[int]]) -> dict: + return { + key: torch.stack(self.hf_dataset.select(q_idx)[key]) + for key, q_idx in query_indices.items() + if key not in self.meta.video_keys + } + + def _query_videos(self, query_timestamps: dict[str, list[float]], ep_idx: int) -> dict: + """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function + in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a + Segmentation Fault. This probably happens because a memory reference to the video loader is created in + the main process and a subprocess fails to access it. """ - # 1e-4 to account for possible numerical error - return 1 / self.fps - 1e-4 + item = {} + for vid_key, query_ts in query_timestamps.items(): + video_path = self.root / self.meta.get_video_file_path(ep_idx, vid_key) + frames = decode_video_frames_torchvision( + video_path, query_ts, self.tolerance_s, self.video_backend + ) + item[vid_key] = frames.squeeze(0) + + return item + + def _add_padding_keys(self, item: dict, padding: dict[str, list[bool]]) -> dict: + for key, val in padding.items(): + item[key] = torch.BoolTensor(val) + return item def __len__(self): - return self.num_samples + return self.num_frames - def __getitem__(self, idx): + def __getitem__(self, idx) -> dict: item = self.hf_dataset[idx] + ep_idx = item["episode_index"].item() - if self.delta_timestamps is not None: - item = load_previous_and_future_frames( - item, - self.hf_dataset, - self.episode_data_index, - self.delta_timestamps, - self.tolerance_s, - ) + query_indices = None + if self.delta_indices is not None: + current_ep_idx = self.episodes.index(ep_idx) if self.episodes is not None else ep_idx + query_indices, padding = self._get_query_indices(idx, current_ep_idx) + query_result = self._query_hf_dataset(query_indices) + item = {**item, **padding} + for key, val in query_result.items(): + item[key] = val - if self.video: - item = load_from_videos( - item, - self.video_frame_keys, - self.videos_dir, - self.tolerance_s, - self.video_backend, - ) + if len(self.meta.video_keys) > 0: + current_ts = item["timestamp"].item() + query_timestamps = self._get_query_timestamps(current_ts, query_indices) + video_frames = self._query_videos(query_timestamps, ep_idx) + item = {**video_frames, **item} if self.image_transforms is not None: - for cam in self.camera_keys: + image_keys = self.meta.camera_keys + for cam in image_keys: item[cam] = self.image_transforms(item[cam]) return item def __repr__(self): + feature_keys = list(self.features) return ( - f"{self.__class__.__name__}(\n" - f" Repository ID: '{self.repo_id}',\n" - f" Split: '{self.split}',\n" - f" Number of Samples: {self.num_samples},\n" - f" Number of Episodes: {self.num_episodes},\n" - f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n" - f" Recorded Frames per Second: {self.fps},\n" - f" Camera Keys: {self.camera_keys},\n" - f" Video Frame Keys: {self.video_frame_keys if self.video else 'N/A'},\n" - f" Transformations: {self.image_transforms},\n" - f" Codebase Version: {self.info.get('codebase_version', '< v1.6')},\n" - f")" + f"{self.__class__.__name__}({{\n" + f" Repository ID: '{self.repo_id}',\n" + f" Number of selected episodes: '{self.num_episodes}',\n" + f" Number of selected samples: '{self.num_frames}',\n" + f" Features: '{feature_keys}',\n" + "})',\n" + ) + + def create_episode_buffer(self, episode_index: int | None = None) -> dict: + current_ep_idx = self.meta.total_episodes if episode_index is None else episode_index + return { + "size": 0, + **{key: current_ep_idx if key == "episode_index" else [] for key in self.features}, + } + + def _get_image_file_path(self, episode_index: int, image_key: str, frame_index: int) -> Path: + fpath = DEFAULT_IMAGE_PATH.format( + image_key=image_key, episode_index=episode_index, frame_index=frame_index ) + return self.root / fpath + + def _save_image(self, image: torch.Tensor | np.ndarray | PIL.Image.Image, fpath: Path) -> None: + if self.image_writer is None: + if isinstance(image, torch.Tensor): + image = image.cpu().numpy() + write_image(image, fpath) + else: + self.image_writer.save_image(image=image, fpath=fpath) + + def add_frame(self, frame: dict) -> None: + """ + This function only adds the frame to the episode_buffer. Apart from images — which are written in a + temporary directory — nothing is written to disk. To save those frames, the 'save_episode()' method + then needs to be called. + """ + # TODO(aliberts, rcadene): Add sanity check for the input, check it's numpy or torch, + # check the dtype and shape matches, etc. + + if self.episode_buffer is None: + self.episode_buffer = self.create_episode_buffer() + + frame_index = self.episode_buffer["size"] + timestamp = frame.pop("timestamp") if "timestamp" in frame else frame_index / self.fps + self.episode_buffer["frame_index"].append(frame_index) + self.episode_buffer["timestamp"].append(timestamp) + + for key in frame: + if key not in self.features: + raise ValueError(key) + + if self.features[key]["dtype"] not in ["image", "video"]: + item = frame[key].numpy() if isinstance(frame[key], torch.Tensor) else frame[key] + self.episode_buffer[key].append(item) + elif self.features[key]["dtype"] in ["image", "video"]: + img_path = self._get_image_file_path( + episode_index=self.episode_buffer["episode_index"], image_key=key, frame_index=frame_index + ) + if frame_index == 0: + img_path.parent.mkdir(parents=True, exist_ok=True) + self._save_image(frame[key], img_path) + self.episode_buffer[key].append(str(img_path)) + + self.episode_buffer["size"] += 1 + + def save_episode(self, task: str, encode_videos: bool = True, episode_data: dict | None = None) -> None: + """ + This will save to disk the current episode in self.episode_buffer. Note that since it affects files on + disk, it sets self.consolidated to False to ensure proper consolidation later on before uploading to + the hub. + + Use 'encode_videos' if you want to encode videos during the saving of this episode. Otherwise, + you can do it later with dataset.consolidate(). This is to give more flexibility on when to spend + time for video encoding. + """ + if not episode_data: + episode_buffer = self.episode_buffer + + episode_length = episode_buffer.pop("size") + episode_index = episode_buffer["episode_index"] + if episode_index != self.meta.total_episodes: + # TODO(aliberts): Add option to use existing episode_index + raise NotImplementedError( + "You might have manually provided the episode_buffer with an episode_index that doesn't " + "match the total number of episodes in the dataset. This is not supported for now." + ) + + if episode_length == 0: + raise ValueError( + "You must add one or several frames with `add_frame` before calling `add_episode`." + ) + + task_index = self.meta.get_task_index(task) + + if not set(episode_buffer.keys()) == set(self.features): + raise ValueError() + + for key, ft in self.features.items(): + if key == "index": + episode_buffer[key] = np.arange( + self.meta.total_frames, self.meta.total_frames + episode_length + ) + elif key == "episode_index": + episode_buffer[key] = np.full((episode_length,), episode_index) + elif key == "task_index": + episode_buffer[key] = np.full((episode_length,), task_index) + elif ft["dtype"] in ["image", "video"]: + continue + elif len(ft["shape"]) == 1 and ft["shape"][0] == 1: + episode_buffer[key] = np.array(episode_buffer[key], dtype=ft["dtype"]) + elif len(ft["shape"]) == 1 and ft["shape"][0] > 1: + episode_buffer[key] = np.stack(episode_buffer[key]) + else: + raise ValueError(key) + + self._wait_image_writer() + self._save_episode_table(episode_buffer, episode_index) + + self.meta.save_episode(episode_index, episode_length, task, task_index) + + if encode_videos and len(self.meta.video_keys) > 0: + video_paths = self.encode_episode_videos(episode_index) + for key in self.meta.video_keys: + episode_buffer[key] = video_paths[key] + + if not episode_data: # Reset the buffer + self.episode_buffer = self.create_episode_buffer() + + self.consolidated = False + + def _save_episode_table(self, episode_buffer: dict, episode_index: int) -> None: + episode_dict = {key: episode_buffer[key] for key in self.hf_features} + ep_dataset = datasets.Dataset.from_dict(episode_dict, features=self.hf_features, split="train") + ep_data_path = self.root / self.meta.get_data_file_path(ep_index=episode_index) + ep_data_path.parent.mkdir(parents=True, exist_ok=True) + write_parquet(ep_dataset, ep_data_path) + + def clear_episode_buffer(self) -> None: + episode_index = self.episode_buffer["episode_index"] + if self.image_writer is not None: + for cam_key in self.meta.camera_keys: + img_dir = self._get_image_file_path( + episode_index=episode_index, image_key=cam_key, frame_index=0 + ).parent + if img_dir.is_dir(): + shutil.rmtree(img_dir) + + # Reset the buffer + self.episode_buffer = self.create_episode_buffer() + + def start_image_writer(self, num_processes: int = 0, num_threads: int = 4) -> None: + if isinstance(self.image_writer, AsyncImageWriter): + logging.warning( + "You are starting a new AsyncImageWriter that is replacing an already existing one in the dataset." + ) + + self.image_writer = AsyncImageWriter( + num_processes=num_processes, + num_threads=num_threads, + ) + + def stop_image_writer(self) -> None: + """ + Whenever wrapping this dataset inside a parallelized DataLoader, this needs to be called first to + remove the image_write in order for the LeRobotDataset object to be pickleable and parallelized. + """ + if self.image_writer is not None: + self.image_writer.stop() + self.image_writer = None + + def _wait_image_writer(self) -> None: + """Wait for asynchronous image writer to finish.""" + if self.image_writer is not None: + self.image_writer.wait_until_done() + + def encode_videos(self) -> None: + """ + Use ffmpeg to convert frames stored as png into mp4 videos. + Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, + since video encoding with ffmpeg is already using multithreading. + """ + for ep_idx in range(self.meta.total_episodes): + self.encode_episode_videos(ep_idx) + + def encode_episode_videos(self, episode_index: int) -> dict: + """ + Use ffmpeg to convert frames stored as png into mp4 videos. + Note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, + since video encoding with ffmpeg is already using multithreading. + """ + video_paths = {} + for key in self.meta.video_keys: + video_path = self.root / self.meta.get_video_file_path(episode_index, key) + video_paths[key] = str(video_path) + if video_path.is_file(): + # Skip if video is already encoded. Could be the case when resuming data recording. + continue + img_dir = self._get_image_file_path( + episode_index=episode_index, image_key=key, frame_index=0 + ).parent + encode_video_frames(img_dir, video_path, self.fps, overwrite=True) + + return video_paths + + def consolidate(self, run_compute_stats: bool = True, keep_image_files: bool = False) -> None: + self.hf_dataset = self.load_hf_dataset() + self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes) + check_timestamps_sync(self.hf_dataset, self.episode_data_index, self.fps, self.tolerance_s) + + if len(self.meta.video_keys) > 0: + self.encode_videos() + self.meta.write_video_info() + + if not keep_image_files: + img_dir = self.root / "images" + if img_dir.is_dir(): + shutil.rmtree(self.root / "images") + + video_files = list(self.root.rglob("*.mp4")) + assert len(video_files) == self.num_episodes * len(self.meta.video_keys) + + parquet_files = list(self.root.rglob("*.parquet")) + assert len(parquet_files) == self.num_episodes + + if run_compute_stats: + self.stop_image_writer() + # TODO(aliberts): refactor stats in save_episodes + self.meta.stats = compute_stats(self) + serialized_stats = serialize_dict(self.meta.stats) + write_json(serialized_stats, self.root / STATS_PATH) + self.consolidated = True + else: + logging.warning( + "Skipping computation of the dataset statistics, dataset is not fully consolidated." + ) @classmethod - def from_preloaded( + def create( cls, - repo_id: str = "from_preloaded", - root: Path | None = None, - split: str = "train", - transform: callable = None, - delta_timestamps: dict[list[float]] | None = None, - # additional preloaded attributes - hf_dataset=None, - episode_data_index=None, - stats=None, - info=None, - videos_dir=None, - video_backend=None, + repo_id: str, + fps: int, + root: str | Path | None = None, + robot: Robot | None = None, + robot_type: str | None = None, + features: dict | None = None, + use_videos: bool = True, + tolerance_s: float = 1e-4, + image_writer_processes: int = 0, + image_writer_threads: int = 0, + video_backend: str | None = None, ) -> "LeRobotDataset": - """Create a LeRobot Dataset from existing data and attributes instead of loading from the filesystem. + """Create a LeRobot Dataset from scratch in order to record data.""" + obj = cls.__new__(cls) + obj.meta = LeRobotDatasetMetadata.create( + repo_id=repo_id, + fps=fps, + root=root, + robot=robot, + robot_type=robot_type, + features=features, + use_videos=use_videos, + ) + obj.repo_id = obj.meta.repo_id + obj.root = obj.meta.root + obj.local_files_only = obj.meta.local_files_only + obj.tolerance_s = tolerance_s + obj.image_writer = None - It is especially useful when converting raw data into LeRobotDataset before saving the dataset - on the filesystem or uploading to the hub. + if image_writer_processes or image_writer_threads: + obj.start_image_writer(image_writer_processes, image_writer_threads) - Note: Meta-data attributes like `repo_id`, `version`, `root`, etc are optional and potentially - meaningless depending on the downstream usage of the return dataset. - """ - # create an empty object of type LeRobotDataset - obj = cls.__new__(cls) - obj.repo_id = repo_id - obj.root = root - obj.split = split - obj.image_transforms = transform - obj.delta_timestamps = delta_timestamps - obj.hf_dataset = hf_dataset - obj.episode_data_index = episode_data_index - obj.stats = stats - obj.info = info if info is not None else {} - obj.videos_dir = videos_dir + # TODO(aliberts, rcadene, alexander-soare): Merge this with OnlineBuffer/DataBuffer + obj.episode_buffer = obj.create_episode_buffer() + + # This bool indicates that the current LeRobotDataset instance is in sync with the files on disk. It + # is used to know when certain operations are need (for instance, computing dataset statistics). In + # order to be able to push the dataset to the hub, it needs to be consolidated first by calling + # self.consolidate(). + obj.consolidated = True + + obj.episodes = None + obj.hf_dataset = None + obj.image_transforms = None + obj.delta_timestamps = None + obj.delta_indices = None + obj.episode_data_index = None obj.video_backend = video_backend if video_backend is not None else "pyav" return obj @@ -225,57 +969,56 @@ class MultiLeRobotDataset(torch.utils.data.Dataset): def __init__( self, repo_ids: list[str], - root: Path | None = DATA_DIR, - split: str = "train", + root: str | Path | None = None, + episodes: dict | None = None, image_transforms: Callable | None = None, delta_timestamps: dict[list[float]] | None = None, + tolerances_s: dict | None = None, + download_videos: bool = True, + local_files_only: bool = False, video_backend: str | None = None, ): super().__init__() self.repo_ids = repo_ids + self.root = Path(root) if root else LEROBOT_HOME + self.tolerances_s = tolerances_s if tolerances_s else {repo_id: 1e-4 for repo_id in repo_ids} # Construct the underlying datasets passing everything but `transform` and `delta_timestamps` which # are handled by this class. self._datasets = [ LeRobotDataset( repo_id, - root=root, - split=split, - delta_timestamps=delta_timestamps, + root=self.root / repo_id, + episodes=episodes[repo_id] if episodes else None, image_transforms=image_transforms, + delta_timestamps=delta_timestamps, + tolerance_s=self.tolerances_s[repo_id], + download_videos=download_videos, + local_files_only=local_files_only, video_backend=video_backend, ) for repo_id in repo_ids ] - # Check that some properties are consistent across datasets. Note: We may relax some of these - # consistency requirements in future iterations of this class. - for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True): - if dataset.info != self._datasets[0].info: - raise ValueError( - f"Detected a mismatch in dataset info between {self.repo_ids[0]} and {repo_id}. This is " - "not yet supported." - ) + # Disable any data keys that are not common across all of the datasets. Note: we may relax this # restriction in future iterations of this class. For now, this is necessary at least for being able # to use PyTorch's default DataLoader collate function. - self.disabled_data_keys = set() - intersection_data_keys = set(self._datasets[0].hf_dataset.features) - for dataset in self._datasets: - intersection_data_keys.intersection_update(dataset.hf_dataset.features) - if len(intersection_data_keys) == 0: + self.disabled_features = set() + intersection_features = set(self._datasets[0].features) + for ds in self._datasets: + intersection_features.intersection_update(ds.features) + if len(intersection_features) == 0: raise RuntimeError( - "Multiple datasets were provided but they had no keys common to all of them. The " - "multi-dataset functionality currently only keeps common keys." + "Multiple datasets were provided but they had no keys common to all of them. " + "The multi-dataset functionality currently only keeps common keys." ) - for repo_id, dataset in zip(self.repo_ids, self._datasets, strict=True): - extra_keys = set(dataset.hf_dataset.features).difference(intersection_data_keys) + for repo_id, ds in zip(self.repo_ids, self._datasets, strict=True): + extra_keys = set(ds.features).difference(intersection_features) logging.warning( f"keys {extra_keys} of {repo_id} were disabled as they are not contained in all the " "other datasets." ) - self.disabled_data_keys.update(extra_keys) + self.disabled_features.update(extra_keys) - self.root = root - self.split = split self.image_transforms = image_transforms self.delta_timestamps = delta_timestamps self.stats = aggregate_stats(self._datasets) @@ -299,7 +1042,7 @@ def fps(self) -> int: NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ - return self._datasets[0].info["fps"] + return self._datasets[0].meta.info["fps"] @property def video(self) -> bool: @@ -309,13 +1052,13 @@ def video(self) -> bool: NOTE: Fow now, this relies on a check in __init__ to make sure all sub-datasets have the same info. """ - return self._datasets[0].info.get("video", False) + return self._datasets[0].meta.info.get("video", False) @property def features(self) -> datasets.Features: features = {} for dataset in self._datasets: - features.update({k: v for k, v in dataset.features.items() if k not in self.disabled_data_keys}) + features.update({k: v for k, v in dataset.hf_features.items() if k not in self.disabled_features}) return features @property @@ -342,9 +1085,9 @@ def video_frame_keys(self) -> list[str]: return video_frame_keys @property - def num_samples(self) -> int: + def num_frames(self) -> int: """Number of samples/frames.""" - return sum(d.num_samples for d in self._datasets) + return sum(d.num_frames for d in self._datasets) @property def num_episodes(self) -> int: @@ -361,7 +1104,7 @@ def tolerance_s(self) -> float: return 1 / self.fps - 1e-4 def __len__(self): - return self.num_samples + return self.num_frames def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: if idx >= len(self): @@ -370,8 +1113,8 @@ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: start_idx = 0 dataset_idx = 0 for dataset in self._datasets: - if idx >= start_idx + dataset.num_samples: - start_idx += dataset.num_samples + if idx >= start_idx + dataset.num_frames: + start_idx += dataset.num_frames dataset_idx += 1 continue break @@ -379,7 +1122,7 @@ def __getitem__(self, idx: int) -> dict[str, torch.Tensor]: raise AssertionError("We expect the loop to break out as long as the index is within bounds.") item = self._datasets[dataset_idx][idx - start_idx] item["dataset_index"] = torch.tensor(dataset_idx) - for data_key in self.disabled_data_keys: + for data_key in self.disabled_features: if data_key in item: del item[data_key] @@ -389,8 +1132,7 @@ def __repr__(self): return ( f"{self.__class__.__name__}(\n" f" Repository IDs: '{self.repo_ids}',\n" - f" Split: '{self.split}',\n" - f" Number of Samples: {self.num_samples},\n" + f" Number of Samples: {self.num_frames},\n" f" Number of Episodes: {self.num_episodes},\n" f" Type: {'video (.mp4)' if self.video else 'image (.png)'},\n" f" Recorded Frames per Second: {self.fps},\n" diff --git a/lerobot/common/datasets/online_buffer.py b/lerobot/common/datasets/online_buffer.py index 6b093cda7..d907e4687 100644 --- a/lerobot/common/datasets/online_buffer.py +++ b/lerobot/common/datasets/online_buffer.py @@ -187,7 +187,7 @@ def add_data(self, data: dict[str, np.ndarray]): assert data[OnlineBuffer.INDEX_KEY][0].item() == 0 # Shift the incoming indices if necessary. - if self.num_samples > 0: + if self.num_frames > 0: last_episode_index = self._data[OnlineBuffer.EPISODE_INDEX_KEY][next_index - 1] last_data_index = self._data[OnlineBuffer.INDEX_KEY][next_index - 1] data[OnlineBuffer.EPISODE_INDEX_KEY] += last_episode_index + 1 @@ -227,11 +227,11 @@ def num_episodes(self) -> int: ) @property - def num_samples(self) -> int: + def num_frames(self) -> int: return np.count_nonzero(self._data[OnlineBuffer.OCCUPANCY_MASK_KEY]) def __len__(self): - return self.num_samples + return self.num_frames def _item_to_tensors(self, item: dict) -> dict: item_ = {} diff --git a/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py b/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py index 52c4bba3d..e2973ef81 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py @@ -30,12 +30,12 @@ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames diff --git a/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py b/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py index be20c92cd..264925766 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/cam_png_format.py @@ -24,8 +24,11 @@ from PIL import Image as PILImage from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION -from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes -from lerobot.common.datasets.utils import calculate_episode_data_index, hf_transform_to_torch +from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, + concatenate_episodes, +) +from lerobot.common.datasets.utils import hf_transform_to_torch from lerobot.common.datasets.video_utils import VideoFrame diff --git a/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py b/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py index 72be130e3..95f9c0071 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/dora_parquet_format.py @@ -26,8 +26,8 @@ from datasets import Dataset, Features, Image, Sequence, Value from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION +from lerobot.common.datasets.push_dataset_to_hub.utils import calculate_episode_data_index from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame diff --git a/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py b/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py index f5744c521..cfe115034 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py @@ -42,12 +42,12 @@ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.openx.transforms import OPENX_STANDARDIZATION_TRANSFORMS from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames diff --git a/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py b/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py index 13d6c837e..27b31ba24 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py @@ -27,12 +27,12 @@ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames diff --git a/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py b/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py index d724cf33e..fec893a7f 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py @@ -28,12 +28,12 @@ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub._umi_imagecodecs_numcodecs import register_codecs from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames diff --git a/lerobot/common/datasets/push_dataset_to_hub/utils.py b/lerobot/common/datasets/push_dataset_to_hub/utils.py index 97b54e45b..ebcf87f77 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/utils.py +++ b/lerobot/common/datasets/push_dataset_to_hub/utils.py @@ -16,7 +16,9 @@ import inspect from concurrent.futures import ThreadPoolExecutor from pathlib import Path +from typing import Dict +import datasets import numpy import PIL import torch @@ -72,3 +74,58 @@ def check_repo_id(repo_id: str) -> None: f"""`repo_id` is expected to contain a community or user id `/` the name of the dataset (e.g. 'lerobot/pusht'), but contains '{repo_id}'.""" ) + + +# TODO(aliberts): remove +def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]: + """ + Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset. + + Parameters: + - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index. + + Returns: + - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys: + - "from": A tensor containing the starting index of each episode. + - "to": A tensor containing the ending index of each episode. + """ + episode_data_index = {"from": [], "to": []} + + current_episode = None + """ + The episode_index is a list of integers, each representing the episode index of the corresponding example. + For instance, the following is a valid episode_index: + [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2] + + Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and + ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this: + { + "from": [0, 3, 7], + "to": [3, 7, 12] + } + """ + if len(hf_dataset) == 0: + episode_data_index = { + "from": torch.tensor([]), + "to": torch.tensor([]), + } + return episode_data_index + for idx, episode_idx in enumerate(hf_dataset["episode_index"]): + if episode_idx != current_episode: + # We encountered a new episode, so we append its starting location to the "from" list + episode_data_index["from"].append(idx) + # If this is not the first episode, we append the ending location of the previous episode to the "to" list + if current_episode is not None: + episode_data_index["to"].append(idx) + # Let's keep track of the current episode index + current_episode = episode_idx + else: + # We are still in the same episode, so there is nothing for us to do here + pass + # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list + episode_data_index["to"].append(idx + 1) + + for k in ["from", "to"]: + episode_data_index[k] = torch.tensor(episode_data_index[k]) + + return episode_data_index diff --git a/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py b/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py index ad1cb560e..0047e48c3 100644 --- a/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py +++ b/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py @@ -27,12 +27,12 @@ from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub.utils import ( + calculate_episode_data_index, concatenate_episodes, get_default_encoding, save_images_concurrently, ) from lerobot.common.datasets.utils import ( - calculate_episode_data_index, hf_transform_to_torch, ) from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames diff --git a/lerobot/common/datasets/utils.py b/lerobot/common/datasets/utils.py index d6aef15f5..5f088b118 100644 --- a/lerobot/common/datasets/utils.py +++ b/lerobot/common/datasets/utils.py @@ -14,30 +14,56 @@ # See the License for the specific language governing permissions and # limitations under the License. import json -import re -import warnings -from functools import cache +import logging +import textwrap +from itertools import accumulate from pathlib import Path -from typing import Dict +from pprint import pformat +from typing import Any import datasets +import jsonlines +import numpy as np +import pyarrow.compute as pc import torch -from datasets import load_dataset, load_from_disk -from huggingface_hub import DatasetCard, HfApi, hf_hub_download, snapshot_download +from datasets.table import embed_table_storage +from huggingface_hub import DatasetCard, DatasetCardData, HfApi from PIL import Image as PILImage -from safetensors.torch import load_file from torchvision import transforms +from lerobot.common.robot_devices.robots.utils import Robot + +DEFAULT_CHUNK_SIZE = 1000 # Max number of episodes per chunk + +INFO_PATH = "meta/info.json" +EPISODES_PATH = "meta/episodes.jsonl" +STATS_PATH = "meta/stats.json" +TASKS_PATH = "meta/tasks.jsonl" + +DEFAULT_VIDEO_PATH = "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4" +DEFAULT_PARQUET_PATH = "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet" +DEFAULT_IMAGE_PATH = "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.png" + DATASET_CARD_TEMPLATE = """ --- # Metadata will go there --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). +## {} + """ +DEFAULT_FEATURES = { + "timestamp": {"dtype": "float32", "shape": (1,), "names": None}, + "frame_index": {"dtype": "int64", "shape": (1,), "names": None}, + "episode_index": {"dtype": "int64", "shape": (1,), "names": None}, + "index": {"dtype": "int64", "shape": (1,), "names": None}, + "task_index": {"dtype": "int64", "shape": (1,), "names": None}, +} + -def flatten_dict(d, parent_key="", sep="/"): +def flatten_dict(d: dict, parent_key: str = "", sep: str = "/") -> dict: """Flatten a nested dictionary structure by collapsing nested keys into one key with a separator. For example: @@ -56,7 +82,7 @@ def flatten_dict(d, parent_key="", sep="/"): return dict(items) -def unflatten_dict(d, sep="/"): +def unflatten_dict(d: dict, sep: str = "/") -> dict: outdict = {} for key, value in d.items(): parts = key.split(sep) @@ -69,6 +95,82 @@ def unflatten_dict(d, sep="/"): return outdict +def serialize_dict(stats: dict[str, torch.Tensor | np.ndarray | dict]) -> dict: + serialized_dict = {key: value.tolist() for key, value in flatten_dict(stats).items()} + return unflatten_dict(serialized_dict) + + +def write_parquet(dataset: datasets.Dataset, fpath: Path) -> None: + # Embed image bytes into the table before saving to parquet + format = dataset.format + dataset = dataset.with_format("arrow") + dataset = dataset.map(embed_table_storage, batched=False) + dataset = dataset.with_format(**format) + dataset.to_parquet(fpath) + + +def load_json(fpath: Path) -> Any: + with open(fpath) as f: + return json.load(f) + + +def write_json(data: dict, fpath: Path) -> None: + fpath.parent.mkdir(exist_ok=True, parents=True) + with open(fpath, "w") as f: + json.dump(data, f, indent=4, ensure_ascii=False) + + +def load_jsonlines(fpath: Path) -> list[Any]: + with jsonlines.open(fpath, "r") as reader: + return list(reader) + + +def write_jsonlines(data: dict, fpath: Path) -> None: + fpath.parent.mkdir(exist_ok=True, parents=True) + with jsonlines.open(fpath, "w") as writer: + writer.write_all(data) + + +def append_jsonlines(data: dict, fpath: Path) -> None: + fpath.parent.mkdir(exist_ok=True, parents=True) + with jsonlines.open(fpath, "a") as writer: + writer.write(data) + + +def load_info(local_dir: Path) -> dict: + info = load_json(local_dir / INFO_PATH) + for ft in info["features"].values(): + ft["shape"] = tuple(ft["shape"]) + return info + + +def load_stats(local_dir: Path) -> dict: + if not (local_dir / STATS_PATH).exists(): + return None + stats = load_json(local_dir / STATS_PATH) + stats = {key: torch.tensor(value) for key, value in flatten_dict(stats).items()} + return unflatten_dict(stats) + + +def load_tasks(local_dir: Path) -> dict: + tasks = load_jsonlines(local_dir / TASKS_PATH) + return {item["task_index"]: item["task"] for item in sorted(tasks, key=lambda x: x["task_index"])} + + +def load_episodes(local_dir: Path) -> dict: + return load_jsonlines(local_dir / EPISODES_PATH) + + +def load_image_as_numpy(fpath: str | Path, dtype="float32", channel_first: bool = True) -> np.ndarray: + img = PILImage.open(fpath).convert("RGB") + img_array = np.array(img, dtype=dtype) + if channel_first: # (H, W, C) -> (C, H, W) + img_array = np.transpose(img_array, (2, 0, 1)) + if "float" in dtype: + img_array /= 255.0 + return img_array + + def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]): """Get a transform function that convert items from Hugging Face dataset (pyarrow) to torch tensors. Importantly, images are converted from PIL, which corresponds to @@ -80,14 +182,6 @@ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]): if isinstance(first_item, PILImage.Image): to_tensor = transforms.ToTensor() items_dict[key] = [to_tensor(img) for img in items_dict[key]] - elif isinstance(first_item, str): - # TODO (michel-aractingi): add str2embedding via language tokenizer - # For now we leave this part up to the user to choose how to address - # language conditioned tasks - pass - elif isinstance(first_item, dict) and "path" in first_item and "timestamp" in first_item: - # video frame will be processed downstream - pass elif first_item is None: pass else: @@ -95,19 +189,67 @@ def hf_transform_to_torch(items_dict: dict[torch.Tensor | None]): return items_dict -@cache -def get_hf_dataset_safe_version(repo_id: str, version: str) -> str: +def _get_major_minor(version: str) -> tuple[int]: + split = version.strip("v").split(".") + return int(split[0]), int(split[1]) + + +class BackwardCompatibilityError(Exception): + def __init__(self, repo_id, version): + message = textwrap.dedent(f""" + BackwardCompatibilityError: The dataset you requested ({repo_id}) is in {version} format. + + We introduced a new format since v2.0 which is not backward compatible with v1.x. + Please, use our conversion script. Modify the following command with your own task description: + ``` + python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \\ + --repo-id {repo_id} \\ + --single-task "TASK DESCRIPTION." # <---- /!\\ Replace TASK DESCRIPTION /!\\ + ``` + + A few examples to replace TASK DESCRIPTION: "Pick up the blue cube and place it into the bin.", + "Insert the peg into the socket.", "Slide open the ziploc bag.", "Take the elevator to the 1st floor.", + "Open the top cabinet, store the pot inside it then close the cabinet.", "Push the T-shaped block onto the T-shaped target.", + "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", "Fold the sweatshirt.", ... + + If you encounter a problem, contact LeRobot maintainers on [Discord](https://discord.com/invite/s3KuuzsPFb) + or open an [issue on GitHub](https://github.com/huggingface/lerobot/issues/new/choose). + """) + super().__init__(message) + + +def check_version_compatibility( + repo_id: str, version_to_check: str, current_version: str, enforce_breaking_major: bool = True +) -> None: + current_major, _ = _get_major_minor(current_version) + major_to_check, _ = _get_major_minor(version_to_check) + if major_to_check < current_major and enforce_breaking_major: + raise BackwardCompatibilityError(repo_id, version_to_check) + elif float(version_to_check.strip("v")) < float(current_version.strip("v")): + logging.warning( + f"""The dataset you requested ({repo_id}) was created with a previous version ({version_to_check}) of the + codebase. The current codebase version is {current_version}. You should be fine since + backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on + Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""", + ) + + +def get_hub_safe_version(repo_id: str, version: str) -> str: api = HfApi() dataset_info = api.list_repo_refs(repo_id, repo_type="dataset") branches = [b.name for b in dataset_info.branches] if version not in branches: - warnings.warn( + num_version = float(version.strip("v")) + hub_num_versions = [float(v.strip("v")) for v in branches if v.startswith("v")] + if num_version >= 2.0 and all(v < 2.0 for v in hub_num_versions): + raise BackwardCompatibilityError(repo_id, version) + + logging.warning( f"""You are trying to load a dataset from {repo_id} created with a previous version of the codebase. The following versions are available: {branches}. The requested version ('{version}') is not found. You should be fine since backward compatibility is maintained. If you encounter a problem, contact LeRobot maintainers on Discord ('https://discord.com/invite/s3KuuzsPFb') or open an issue on github.""", - stacklevel=1, ) if "main" not in branches: raise ValueError(f"Version 'main' not found on {repo_id}") @@ -116,275 +258,184 @@ def get_hf_dataset_safe_version(repo_id: str, version: str) -> str: return version -def load_hf_dataset(repo_id: str, version: str, root: Path, split: str) -> datasets.Dataset: - """hf_dataset contains all the observations, states, actions, rewards, etc.""" - if root is not None: - hf_dataset = load_from_disk(str(Path(root) / repo_id / "train")) - # TODO(rcadene): clean this which enables getting a subset of dataset - if split != "train": - if "%" in split: - raise NotImplementedError(f"We dont support splitting based on percentage for now ({split}).") - match_from = re.search(r"train\[(\d+):\]", split) - match_to = re.search(r"train\[:(\d+)\]", split) - if match_from: - from_frame_index = int(match_from.group(1)) - hf_dataset = hf_dataset.select(range(from_frame_index, len(hf_dataset))) - elif match_to: - to_frame_index = int(match_to.group(1)) - hf_dataset = hf_dataset.select(range(to_frame_index)) - else: - raise ValueError( - f'`split` ({split}) should either be "train", "train[INT:]", or "train[:INT]"' - ) - else: - safe_version = get_hf_dataset_safe_version(repo_id, version) - hf_dataset = load_dataset(repo_id, revision=safe_version, split=split) - - hf_dataset.set_transform(hf_transform_to_torch) - return hf_dataset - - -def load_episode_data_index(repo_id, version, root) -> dict[str, torch.Tensor]: - """episode_data_index contains the range of indices for each episode - - Example: - ```python - from_id = episode_data_index["from"][episode_id].item() - to_id = episode_data_index["to"][episode_id].item() - episode_frames = [dataset[i] for i in range(from_id, to_id)] - ``` - """ - if root is not None: - path = Path(root) / repo_id / "meta_data" / "episode_data_index.safetensors" - else: - safe_version = get_hf_dataset_safe_version(repo_id, version) - path = hf_hub_download( - repo_id, "meta_data/episode_data_index.safetensors", repo_type="dataset", revision=safe_version - ) - - return load_file(path) - - -def load_stats(repo_id, version, root) -> dict[str, dict[str, torch.Tensor]]: - """stats contains the statistics per modality computed over the full dataset, such as max, min, mean, std - - Example: - ```python - normalized_action = (action - stats["action"]["mean"]) / stats["action"]["std"] - ``` - """ - if root is not None: - path = Path(root) / repo_id / "meta_data" / "stats.safetensors" - else: - safe_version = get_hf_dataset_safe_version(repo_id, version) - path = hf_hub_download( - repo_id, "meta_data/stats.safetensors", repo_type="dataset", revision=safe_version - ) +def get_hf_features_from_features(features: dict) -> datasets.Features: + hf_features = {} + for key, ft in features.items(): + if ft["dtype"] == "video": + continue + elif ft["dtype"] == "image": + hf_features[key] = datasets.Image() + elif ft["shape"] == (1,): + hf_features[key] = datasets.Value(dtype=ft["dtype"]) + else: + assert len(ft["shape"]) == 1 + hf_features[key] = datasets.Sequence( + length=ft["shape"][0], feature=datasets.Value(dtype=ft["dtype"]) + ) - stats = load_file(path) - return unflatten_dict(stats) + return datasets.Features(hf_features) -def load_info(repo_id, version, root) -> dict: - """info contains useful information regarding the dataset that are not stored elsewhere +def get_features_from_robot(robot: Robot, use_videos: bool = True) -> dict: + camera_ft = {} + if robot.cameras: + camera_ft = { + key: {"dtype": "video" if use_videos else "image", **ft} + for key, ft in robot.camera_features.items() + } + return {**robot.motor_features, **camera_ft, **DEFAULT_FEATURES} + + +def create_empty_dataset_info( + codebase_version: str, + fps: int, + robot_type: str, + features: dict, + use_videos: bool, +) -> dict: + return { + "codebase_version": codebase_version, + "robot_type": robot_type, + "total_episodes": 0, + "total_frames": 0, + "total_tasks": 0, + "total_videos": 0, + "total_chunks": 0, + "chunks_size": DEFAULT_CHUNK_SIZE, + "fps": fps, + "splits": {}, + "data_path": DEFAULT_PARQUET_PATH, + "video_path": DEFAULT_VIDEO_PATH if use_videos else None, + "features": features, + } - Example: - ```python - print("frame per second used to collect the video", info["fps"]) - ``` - """ - if root is not None: - path = Path(root) / repo_id / "meta_data" / "info.json" - else: - safe_version = get_hf_dataset_safe_version(repo_id, version) - path = hf_hub_download(repo_id, "meta_data/info.json", repo_type="dataset", revision=safe_version) - with open(path) as f: - info = json.load(f) - return info +def get_episode_data_index( + episode_dicts: list[dict], episodes: list[int] | None = None +) -> dict[str, torch.Tensor]: + episode_lengths = {ep_idx: ep_dict["length"] for ep_idx, ep_dict in enumerate(episode_dicts)} + if episodes is not None: + episode_lengths = {ep_idx: episode_lengths[ep_idx] for ep_idx in episodes} + cumulative_lenghts = list(accumulate(episode_lengths.values())) + return { + "from": torch.LongTensor([0] + cumulative_lenghts[:-1]), + "to": torch.LongTensor(cumulative_lenghts), + } -def load_videos(repo_id, version, root) -> Path: - if root is not None: - path = Path(root) / repo_id / "videos" - else: - # TODO(rcadene): we download the whole repo here. see if we can avoid this - safe_version = get_hf_dataset_safe_version(repo_id, version) - repo_dir = snapshot_download(repo_id, repo_type="dataset", revision=safe_version) - path = Path(repo_dir) / "videos" - return path +def calculate_total_episode( + hf_dataset: datasets.Dataset, raise_if_not_contiguous: bool = True +) -> dict[str, torch.Tensor]: + episode_indices = sorted(hf_dataset.unique("episode_index")) + total_episodes = len(episode_indices) + if raise_if_not_contiguous and episode_indices != list(range(total_episodes)): + raise ValueError("episode_index values are not sorted and contiguous.") + return total_episodes + + +def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> dict[str, torch.Tensor]: + episode_lengths = [] + table = hf_dataset.data.table + total_episodes = calculate_total_episode(hf_dataset) + for ep_idx in range(total_episodes): + ep_table = table.filter(pc.equal(table["episode_index"], ep_idx)) + episode_lengths.insert(ep_idx, len(ep_table)) + + cumulative_lenghts = list(accumulate(episode_lengths)) + return { + "from": torch.LongTensor([0] + cumulative_lenghts[:-1]), + "to": torch.LongTensor(cumulative_lenghts), + } -def load_previous_and_future_frames( - item: dict[str, torch.Tensor], +def check_timestamps_sync( hf_dataset: datasets.Dataset, episode_data_index: dict[str, torch.Tensor], - delta_timestamps: dict[str, list[float]], + fps: int, tolerance_s: float, -) -> dict[torch.Tensor]: + raise_value_error: bool = True, +) -> bool: """ - Given a current item in the dataset containing a timestamp (e.g. 0.6 seconds), and a list of time differences of - some modalities (e.g. delta_timestamps={"observation.image": [-0.8, -0.2, 0, 0.2]}), this function computes for each - given modality (e.g. "observation.image") a list of query timestamps (e.g. [-0.2, 0.4, 0.6, 0.8]) and loads the closest - frames in the dataset. - - Importantly, when no frame can be found around a query timestamp within a specified tolerance window, this function - raises an AssertionError. When a timestamp is queried before the first available timestamp of the episode or after - the last available timestamp, the violation of the tolerance doesnt raise an AssertionError, and the function - populates a boolean array indicating which frames are outside of the episode range. For instance, this boolean array - is useful during batched training to not supervise actions associated to timestamps coming after the end of the - episode, or to pad the observations in a specific way. Note that by default the observation frames before the start - of the episode are the same as the first frame of the episode. - - Parameters: - - item (dict): A dictionary containing all the data related to a frame. It is the result of `dataset[idx]`. Each key - corresponds to a different modality (e.g., "timestamp", "observation.image", "action"). - - hf_dataset (datasets.Dataset): A dictionary containing the full dataset. Each key corresponds to a different - modality (e.g., "timestamp", "observation.image", "action"). - - episode_data_index (dict): A dictionary containing two keys ("from" and "to") associated to dataset indices. - They indicate the start index and end index of each episode in the dataset. - - delta_timestamps (dict): A dictionary containing lists of delta timestamps for each possible modality to be - retrieved. These deltas are added to the item timestamp to form the query timestamps. - - tolerance_s (float, optional): The tolerance level (in seconds) used to determine if a data point is close enough to the query - timestamp by asserting `tol > difference`. It is suggested to set `tol` to a smaller value than the - smallest expected inter-frame period, but large enough to account for jitter. - - Returns: - - The same item with the queried frames for each modality specified in delta_timestamps, with an additional key for - each modality (e.g. "observation.image_is_pad"). - - Raises: - - AssertionError: If any of the frames unexpectedly violate the tolerance level. This could indicate synchronization - issues with timestamps during data collection. + This check is to make sure that each timestamps is separated to the next by 1/fps +/- tolerance to + account for possible numerical error. """ - # get indices of the frames associated to the episode, and their timestamps - ep_id = item["episode_index"].item() - ep_data_id_from = episode_data_index["from"][ep_id].item() - ep_data_id_to = episode_data_index["to"][ep_id].item() - ep_data_ids = torch.arange(ep_data_id_from, ep_data_id_to, 1) - - # load timestamps - ep_timestamps = hf_dataset.select_columns("timestamp")[ep_data_id_from:ep_data_id_to]["timestamp"] - ep_timestamps = torch.stack(ep_timestamps) - - # we make the assumption that the timestamps are sorted - ep_first_ts = ep_timestamps[0] - ep_last_ts = ep_timestamps[-1] - current_ts = item["timestamp"].item() - - for key in delta_timestamps: - # get timestamps used as query to retrieve data of previous/future frames - delta_ts = delta_timestamps[key] - query_ts = current_ts + torch.tensor(delta_ts) - - # compute distances between each query timestamp and all timestamps of all the frames belonging to the episode - dist = torch.cdist(query_ts[:, None], ep_timestamps[:, None], p=1) - min_, argmin_ = dist.min(1) - - # TODO(rcadene): synchronize timestamps + interpolation if needed - - is_pad = min_ > tolerance_s - - # check violated query timestamps are all outside the episode range - assert ((query_ts[is_pad] < ep_first_ts) | (ep_last_ts < query_ts[is_pad])).all(), ( - f"One or several timestamps unexpectedly violate the tolerance ({min_} > {tolerance_s=}) inside episode range." - "This might be due to synchronization issues with timestamps during data collection." - ) - - # get dataset indices corresponding to frames to be loaded - data_ids = ep_data_ids[argmin_] - - # load frames modality - item[key] = hf_dataset.select_columns(key)[data_ids][key] - - if isinstance(item[key][0], dict) and "path" in item[key][0]: - # video mode where frame are expressed as dict of path and timestamp - item[key] = item[key] - else: - item[key] = torch.stack(item[key]) - - item[f"{key}_is_pad"] = is_pad - - return item - - -def calculate_episode_data_index(hf_dataset: datasets.Dataset) -> Dict[str, torch.Tensor]: + timestamps = torch.stack(hf_dataset["timestamp"]) + diffs = torch.diff(timestamps) + within_tolerance = torch.abs(diffs - 1 / fps) <= tolerance_s + + # We mask differences between the timestamp at the end of an episode + # and the one at the start of the next episode since these are expected + # to be outside tolerance. + mask = torch.ones(len(diffs), dtype=torch.bool) + ignored_diffs = episode_data_index["to"][:-1] - 1 + mask[ignored_diffs] = False + filtered_within_tolerance = within_tolerance[mask] + + if not torch.all(filtered_within_tolerance): + # Track original indices before masking + original_indices = torch.arange(len(diffs)) + filtered_indices = original_indices[mask] + outside_tolerance_filtered_indices = torch.nonzero(~filtered_within_tolerance) # .squeeze() + outside_tolerance_indices = filtered_indices[outside_tolerance_filtered_indices] + episode_indices = torch.stack(hf_dataset["episode_index"]) + + outside_tolerances = [] + for idx in outside_tolerance_indices: + entry = { + "timestamps": [timestamps[idx], timestamps[idx + 1]], + "diff": diffs[idx], + "episode_index": episode_indices[idx].item(), + } + outside_tolerances.append(entry) + + if raise_value_error: + raise ValueError( + f"""One or several timestamps unexpectedly violate the tolerance inside episode range. + This might be due to synchronization issues with timestamps during data collection. + \n{pformat(outside_tolerances)}""" + ) + return False + + return True + + +def check_delta_timestamps( + delta_timestamps: dict[str, list[float]], fps: int, tolerance_s: float, raise_value_error: bool = True +) -> bool: + """This will check if all the values in delta_timestamps are multiples of 1/fps +/- tolerance. + This is to ensure that these delta_timestamps added to any timestamp from a dataset will themselves be + actual timestamps from the dataset. """ - Calculate episode data index for the provided HuggingFace Dataset. Relies on episode_index column of hf_dataset. + outside_tolerance = {} + for key, delta_ts in delta_timestamps.items(): + within_tolerance = [abs(ts * fps - round(ts * fps)) / fps <= tolerance_s for ts in delta_ts] + if not all(within_tolerance): + outside_tolerance[key] = [ + ts for ts, is_within in zip(delta_ts, within_tolerance, strict=True) if not is_within + ] - Parameters: - - hf_dataset (datasets.Dataset): A HuggingFace dataset containing the episode index. + if len(outside_tolerance) > 0: + if raise_value_error: + raise ValueError( + f""" + The following delta_timestamps are found outside of tolerance range. + Please make sure they are multiples of 1/{fps} +/- tolerance and adjust + their values accordingly. + \n{pformat(outside_tolerance)} + """ + ) + return False - Returns: - - episode_data_index: A dictionary containing the data index for each episode. The dictionary has two keys: - - "from": A tensor containing the starting index of each episode. - - "to": A tensor containing the ending index of each episode. - """ - episode_data_index = {"from": [], "to": []} + return True - current_episode = None - """ - The episode_index is a list of integers, each representing the episode index of the corresponding example. - For instance, the following is a valid episode_index: - [0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2] - - Below, we iterate through the episode_index and populate the episode_data_index dictionary with the starting and - ending index of each episode. For the episode_index above, the episode_data_index dictionary will look like this: - { - "from": [0, 3, 7], - "to": [3, 7, 12] - } - """ - if len(hf_dataset) == 0: - episode_data_index = { - "from": torch.tensor([]), - "to": torch.tensor([]), - } - return episode_data_index - for idx, episode_idx in enumerate(hf_dataset["episode_index"]): - if episode_idx != current_episode: - # We encountered a new episode, so we append its starting location to the "from" list - episode_data_index["from"].append(idx) - # If this is not the first episode, we append the ending location of the previous episode to the "to" list - if current_episode is not None: - episode_data_index["to"].append(idx) - # Let's keep track of the current episode index - current_episode = episode_idx - else: - # We are still in the same episode, so there is nothing for us to do here - pass - # We have reached the end of the dataset, so we append the ending location of the last episode to the "to" list - episode_data_index["to"].append(idx + 1) - for k in ["from", "to"]: - episode_data_index[k] = torch.tensor(episode_data_index[k]) +def get_delta_indices(delta_timestamps: dict[str, list[float]], fps: int) -> dict[str, list[int]]: + delta_indices = {} + for key, delta_ts in delta_timestamps.items(): + delta_indices[key] = (torch.tensor(delta_ts) * fps).long().tolist() - return episode_data_index - - -def reset_episode_index(hf_dataset: datasets.Dataset) -> datasets.Dataset: - """Reset the `episode_index` of the provided HuggingFace Dataset. - - `episode_data_index` (and related functionality such as `load_previous_and_future_frames`) requires the - `episode_index` to be sorted, continuous (1,1,1 and not 1,2,1) and start at 0. - - This brings the `episode_index` to the required format. - """ - if len(hf_dataset) == 0: - return hf_dataset - unique_episode_idxs = torch.stack(hf_dataset["episode_index"]).unique().tolist() - episode_idx_to_reset_idx_mapping = { - ep_id: reset_ep_id for reset_ep_id, ep_id in enumerate(unique_episode_idxs) - } - - def modify_ep_idx_func(example): - example["episode_index"] = episode_idx_to_reset_idx_mapping[example["episode_index"].item()] - return example - - hf_dataset = hf_dataset.map(modify_ep_idx_func) - - return hf_dataset + return delta_indices def cycle(iterable): @@ -400,7 +451,7 @@ def cycle(iterable): iterator = iter(iterable) -def create_branch(repo_id, *, branch: str, repo_type: str | None = None): +def create_branch(repo_id, *, branch: str, repo_type: str | None = None) -> None: """Create a branch on a existing Hugging Face repo. Delete the branch if it already exists before creating it. """ @@ -415,12 +466,35 @@ def create_branch(repo_id, *, branch: str, repo_type: str | None = None): api.create_branch(repo_id, repo_type=repo_type, branch=branch) -def create_lerobot_dataset_card(tags: list | None = None, text: str | None = None) -> DatasetCard: - card = DatasetCard(DATASET_CARD_TEMPLATE) - card.data.task_categories = ["robotics"] - card.data.tags = ["LeRobot"] - if tags is not None: - card.data.tags += tags - if text is not None: - card.text += text - return card +def create_lerobot_dataset_card( + tags: list | None = None, + dataset_info: dict | None = None, + **kwargs, +) -> DatasetCard: + """ + Keyword arguments will be used to replace values in ./lerobot/common/datasets/card_template.md. + Note: If specified, license must be one of https://huggingface.co/docs/hub/repositories-licenses. + """ + card_tags = ["LeRobot"] + if tags: + card_tags += tags + if dataset_info: + dataset_structure = "[meta/info.json](meta/info.json):\n" + dataset_structure += f"```json\n{json.dumps(dataset_info, indent=4)}\n```\n" + kwargs = {**kwargs, "dataset_structure": dataset_structure} + card_data = DatasetCardData( + license=kwargs.get("license"), + tags=card_tags, + task_categories=["robotics"], + configs=[ + { + "config_name": "default", + "data_files": "data/*/*.parquet", + } + ], + ) + return DatasetCard.from_template( + card_data=card_data, + template_path="./lerobot/common/datasets/card_template.md", + **kwargs, + ) diff --git a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py new file mode 100644 index 000000000..c8da2fe14 --- /dev/null +++ b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py @@ -0,0 +1,882 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script is for internal use to convert all datasets under the 'lerobot' hub user account to v2. + +Note: Since the original Aloha datasets don't use shadow motors, you need to comment those out in +lerobot/configs/robot/aloha.yaml before running this script. +""" + +import traceback +from pathlib import Path +from textwrap import dedent + +from lerobot import available_datasets +from lerobot.common.datasets.v2.convert_dataset_v1_to_v2 import convert_dataset, parse_robot_config + +LOCAL_DIR = Path("data/") + +ALOHA_CONFIG = Path("lerobot/configs/robot/aloha.yaml") +ALOHA_MOBILE_INFO = { + "robot_config": parse_robot_config(ALOHA_CONFIG), + "license": "mit", + "url": "https://mobile-aloha.github.io/", + "paper": "https://arxiv.org/abs/2401.02117", + "citation_bibtex": dedent(r""" + @inproceedings{fu2024mobile, + author = {Fu, Zipeng and Zhao, Tony Z. and Finn, Chelsea}, + title = {Mobile ALOHA: Learning Bimanual Mobile Manipulation with Low-Cost Whole-Body Teleoperation}, + booktitle = {arXiv}, + year = {2024}, + }""").lstrip(), +} +ALOHA_STATIC_INFO = { + "robot_config": parse_robot_config(ALOHA_CONFIG), + "license": "mit", + "url": "https://tonyzhaozh.github.io/aloha/", + "paper": "https://arxiv.org/abs/2304.13705", + "citation_bibtex": dedent(r""" + @article{Zhao2023LearningFB, + title={Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware}, + author={Tony Zhao and Vikash Kumar and Sergey Levine and Chelsea Finn}, + journal={RSS}, + year={2023}, + volume={abs/2304.13705}, + url={https://arxiv.org/abs/2304.13705} + }""").lstrip(), +} +PUSHT_INFO = { + "license": "mit", + "url": "https://diffusion-policy.cs.columbia.edu/", + "paper": "https://arxiv.org/abs/2303.04137v5", + "citation_bibtex": dedent(r""" + @article{chi2024diffusionpolicy, + author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, + title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, + journal = {The International Journal of Robotics Research}, + year = {2024}, + }""").lstrip(), +} +XARM_INFO = { + "license": "mit", + "url": "https://www.nicklashansen.com/td-mpc/", + "paper": "https://arxiv.org/abs/2203.04955", + "citation_bibtex": dedent(r""" + @inproceedings{Hansen2022tdmpc, + title={Temporal Difference Learning for Model Predictive Control}, + author={Nicklas Hansen and Xiaolong Wang and Hao Su}, + booktitle={ICML}, + year={2022} + } + """), +} +UNITREEH_INFO = { + "license": "apache-2.0", +} + +DATASETS = { + "aloha_mobile_cabinet": { + "single_task": "Open the top cabinet, store the pot inside it then close the cabinet.", + **ALOHA_MOBILE_INFO, + }, + "aloha_mobile_chair": { + "single_task": "Push the chairs in front of the desk to place them against it.", + **ALOHA_MOBILE_INFO, + }, + "aloha_mobile_elevator": { + "single_task": "Take the elevator to the 1st floor.", + **ALOHA_MOBILE_INFO, + }, + "aloha_mobile_shrimp": { + "single_task": "Sauté the raw shrimp on both sides, then serve it in the bowl.", + **ALOHA_MOBILE_INFO, + }, + "aloha_mobile_wash_pan": { + "single_task": "Pick up the pan, rinse it in the sink and then place it in the drying rack.", + **ALOHA_MOBILE_INFO, + }, + "aloha_mobile_wipe_wine": { + "single_task": "Pick up the wet cloth on the faucet and use it to clean the spilled wine on the table and underneath the glass.", + **ALOHA_MOBILE_INFO, + }, + "aloha_static_battery": { + "single_task": "Place the battery into the slot of the remote controller.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_candy": {"single_task": "Pick up the candy and unwrap it.", **ALOHA_STATIC_INFO}, + "aloha_static_coffee": { + "single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray, then push the 'Hot Water' and 'Travel Mug' buttons.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_coffee_new": { + "single_task": "Place the coffee capsule inside the capsule container, then place the cup onto the center of the cup tray.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_cups_open": { + "single_task": "Pick up the plastic cup and open its lid.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_fork_pick_up": { + "single_task": "Pick up the fork and place it on the plate.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_pingpong_test": { + "single_task": "Transfer one of the two balls in the right glass into the left glass, then transfer it back to the right glass.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_pro_pencil": { + "single_task": "Pick up the pencil with the right arm, hand it over to the left arm then place it back onto the table.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_screw_driver": { + "single_task": "Pick up the screwdriver with the right arm, hand it over to the left arm then place it into the cup.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_tape": { + "single_task": "Cut a small piece of tape from the tape dispenser then place it on the cardboard box's edge.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_thread_velcro": { + "single_task": "Pick up the velcro cable tie with the left arm, then insert the end of the velcro tie into the other end's loop with the right arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_towel": { + "single_task": "Pick up a piece of paper towel and place it on the spilled liquid.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_vinh_cup": { + "single_task": "Pick up the platic cup with the right arm, then pop its lid open with the left arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_vinh_cup_left": { + "single_task": "Pick up the platic cup with the left arm, then pop its lid open with the right arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_static_ziploc_slide": {"single_task": "Slide open the ziploc bag.", **ALOHA_STATIC_INFO}, + "aloha_sim_insertion_scripted": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO}, + "aloha_sim_insertion_scripted_image": { + "single_task": "Insert the peg into the socket.", + **ALOHA_STATIC_INFO, + }, + "aloha_sim_insertion_human": {"single_task": "Insert the peg into the socket.", **ALOHA_STATIC_INFO}, + "aloha_sim_insertion_human_image": { + "single_task": "Insert the peg into the socket.", + **ALOHA_STATIC_INFO, + }, + "aloha_sim_transfer_cube_scripted": { + "single_task": "Pick up the cube with the right arm and transfer it to the left arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_sim_transfer_cube_scripted_image": { + "single_task": "Pick up the cube with the right arm and transfer it to the left arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_sim_transfer_cube_human": { + "single_task": "Pick up the cube with the right arm and transfer it to the left arm.", + **ALOHA_STATIC_INFO, + }, + "aloha_sim_transfer_cube_human_image": { + "single_task": "Pick up the cube with the right arm and transfer it to the left arm.", + **ALOHA_STATIC_INFO, + }, + "pusht": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO}, + "pusht_image": {"single_task": "Push the T-shaped block onto the T-shaped target.", **PUSHT_INFO}, + "unitreeh1_fold_clothes": {"single_task": "Fold the sweatshirt.", **UNITREEH_INFO}, + "unitreeh1_rearrange_objects": {"single_task": "Put the object into the bin.", **UNITREEH_INFO}, + "unitreeh1_two_robot_greeting": { + "single_task": "Greet the other robot with a high five.", + **UNITREEH_INFO, + }, + "unitreeh1_warehouse": { + "single_task": "Grab the spray paint on the shelf and place it in the bin on top of the robot dog.", + **UNITREEH_INFO, + }, + "xarm_lift_medium": {"single_task": "Pick up the cube and lift it.", **XARM_INFO}, + "xarm_lift_medium_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO}, + "xarm_lift_medium_replay": {"single_task": "Pick up the cube and lift it.", **XARM_INFO}, + "xarm_lift_medium_replay_image": {"single_task": "Pick up the cube and lift it.", **XARM_INFO}, + "xarm_push_medium": {"single_task": "Push the cube onto the target.", **XARM_INFO}, + "xarm_push_medium_image": {"single_task": "Push the cube onto the target.", **XARM_INFO}, + "xarm_push_medium_replay": {"single_task": "Push the cube onto the target.", **XARM_INFO}, + "xarm_push_medium_replay_image": {"single_task": "Push the cube onto the target.", **XARM_INFO}, + "umi_cup_in_the_wild": { + "single_task": "Put the cup on the plate.", + "license": "apache-2.0", + }, + "asu_table_top": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://link.springer.com/article/10.1007/s10514-023-10129-1", + "citation_bibtex": dedent(r""" + @inproceedings{zhou2023modularity, + title={Modularity through Attention: Efficient Training and Transfer of Language-Conditioned Policies for Robot Manipulation}, + author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Stepputtis, Simon and Amor, Heni}, + booktitle={Conference on Robot Learning}, + pages={1684--1695}, + year={2023}, + organization={PMLR} + } + @article{zhou2023learning, + title={Learning modular language-conditioned robot policies through attention}, + author={Zhou, Yifan and Sonawani, Shubham and Phielipp, Mariano and Ben Amor, Heni and Stepputtis, Simon}, + journal={Autonomous Robots}, + pages={1--21}, + year={2023}, + publisher={Springer} + }""").lstrip(), + }, + "austin_buds_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://ut-austin-rpl.github.io/BUDS-website/", + "paper": "https://arxiv.org/abs/2109.13841", + "citation_bibtex": dedent(r""" + @article{zhu2022bottom, + title={Bottom-Up Skill Discovery From Unsegmented Demonstrations for Long-Horizon Robot Manipulation}, + author={Zhu, Yifeng and Stone, Peter and Zhu, Yuke}, + journal={IEEE Robotics and Automation Letters}, + volume={7}, + number={2}, + pages={4126--4133}, + year={2022}, + publisher={IEEE} + }""").lstrip(), + }, + "austin_sailor_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://ut-austin-rpl.github.io/sailor/", + "paper": "https://arxiv.org/abs/2210.11435", + "citation_bibtex": dedent(r""" + @inproceedings{nasiriany2022sailor, + title={Learning and Retrieval from Prior Data for Skill-based Imitation Learning}, + author={Soroush Nasiriany and Tian Gao and Ajay Mandlekar and Yuke Zhu}, + booktitle={Conference on Robot Learning (CoRL)}, + year={2022} + }""").lstrip(), + }, + "austin_sirius_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://ut-austin-rpl.github.io/sirius/", + "paper": "https://arxiv.org/abs/2211.08416", + "citation_bibtex": dedent(r""" + @inproceedings{liu2022robot, + title = {Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment}, + author = {Huihan Liu and Soroush Nasiriany and Lance Zhang and Zhiyao Bao and Yuke Zhu}, + booktitle = {Robotics: Science and Systems (RSS)}, + year = {2023} + }""").lstrip(), + }, + "berkeley_autolab_ur5": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://sites.google.com/view/berkeley-ur5/home", + "citation_bibtex": dedent(r""" + @misc{BerkeleyUR5Website, + title = {Berkeley {UR5} Demonstration Dataset}, + author = {Lawrence Yunliang Chen and Simeon Adebola and Ken Goldberg}, + howpublished = {https://sites.google.com/view/berkeley-ur5/home}, + }""").lstrip(), + }, + "berkeley_cable_routing": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://sites.google.com/view/cablerouting/home", + "paper": "https://arxiv.org/abs/2307.08927", + "citation_bibtex": dedent(r""" + @article{luo2023multistage, + author = {Jianlan Luo and Charles Xu and Xinyang Geng and Gilbert Feng and Kuan Fang and Liam Tan and Stefan Schaal and Sergey Levine}, + title = {Multi-Stage Cable Routing through Hierarchical Imitation Learning}, + journal = {arXiv pre-print}, + year = {2023}, + url = {https://arxiv.org/abs/2307.08927}, + }""").lstrip(), + }, + "berkeley_fanuc_manipulation": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/berkeley.edu/fanuc-manipulation", + "citation_bibtex": dedent(r""" + @article{fanuc_manipulation2023, + title={Fanuc Manipulation: A Dataset for Learning-based Manipulation with FANUC Mate 200iD Robot}, + author={Zhu, Xinghao and Tian, Ran and Xu, Chenfeng and Ding, Mingyu and Zhan, Wei and Tomizuka, Masayoshi}, + year={2023}, + }""").lstrip(), + }, + "berkeley_gnm_cory_hall": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://arxiv.org/abs/1709.10489", + "citation_bibtex": dedent(r""" + @inproceedings{kahn2018self, + title={Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation}, + author={Kahn, Gregory and Villaflor, Adam and Ding, Bosen and Abbeel, Pieter and Levine, Sergey}, + booktitle={2018 IEEE international conference on robotics and automation (ICRA)}, + pages={5129--5136}, + year={2018}, + organization={IEEE} + }""").lstrip(), + }, + "berkeley_gnm_recon": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/view/recon-robot", + "paper": "https://arxiv.org/abs/2104.05859", + "citation_bibtex": dedent(r""" + @inproceedings{shah2021rapid, + title={Rapid Exploration for Open-World Navigation with Latent Goal Models}, + author={Dhruv Shah and Benjamin Eysenbach and Nicholas Rhinehart and Sergey Levine}, + booktitle={5th Annual Conference on Robot Learning }, + year={2021}, + url={https://openreview.net/forum?id=d_SWJhyKfVw} + }""").lstrip(), + }, + "berkeley_gnm_sac_son": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/view/SACSoN-review", + "paper": "https://arxiv.org/abs/2306.01874", + "citation_bibtex": dedent(r""" + @article{hirose2023sacson, + title={SACSoN: Scalable Autonomous Data Collection for Social Navigation}, + author={Hirose, Noriaki and Shah, Dhruv and Sridhar, Ajay and Levine, Sergey}, + journal={arXiv preprint arXiv:2306.01874}, + year={2023} + }""").lstrip(), + }, + "berkeley_mvp": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://arxiv.org/abs/2203.06173", + "citation_bibtex": dedent(r""" + @InProceedings{Radosavovic2022, + title = {Real-World Robot Learning with Masked Visual Pre-training}, + author = {Ilija Radosavovic and Tete Xiao and Stephen James and Pieter Abbeel and Jitendra Malik and Trevor Darrell}, + booktitle = {CoRL}, + year = {2022} + }""").lstrip(), + }, + "berkeley_rpt": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://arxiv.org/abs/2306.10007", + "citation_bibtex": dedent(r""" + @article{Radosavovic2023, + title={Robot Learning with Sensorimotor Pre-training}, + author={Ilija Radosavovic and Baifeng Shi and Letian Fu and Ken Goldberg and Trevor Darrell and Jitendra Malik}, + year={2023}, + journal={arXiv:2306.10007} + }""").lstrip(), + }, + "cmu_franka_exploration_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://human-world-model.github.io/", + "paper": "https://arxiv.org/abs/2308.10901", + "citation_bibtex": dedent(r""" + @inproceedings{mendonca2023structured, + title={Structured World Models from Human Videos}, + author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak}, + journal={RSS}, + year={2023} + }""").lstrip(), + }, + "cmu_play_fusion": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://play-fusion.github.io/", + "paper": "https://arxiv.org/abs/2312.04549", + "citation_bibtex": dedent(r""" + @inproceedings{chen2023playfusion, + title={PlayFusion: Skill Acquisition via Diffusion from Language-Annotated Play}, + author={Chen, Lili and Bahl, Shikhar and Pathak, Deepak}, + booktitle={CoRL}, + year={2023} + }""").lstrip(), + }, + "cmu_stretch": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://robo-affordances.github.io/", + "paper": "https://arxiv.org/abs/2304.08488", + "citation_bibtex": dedent(r""" + @inproceedings{bahl2023affordances, + title={Affordances from Human Videos as a Versatile Representation for Robotics}, + author={Bahl, Shikhar and Mendonca, Russell and Chen, Lili and Jain, Unnat and Pathak, Deepak}, + booktitle={CVPR}, + year={2023} + } + @article{mendonca2023structured, + title={Structured World Models from Human Videos}, + author={Mendonca, Russell and Bahl, Shikhar and Pathak, Deepak}, + journal={CoRL}, + year={2023} + }""").lstrip(), + }, + "columbia_cairlab_pusht_real": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://diffusion-policy.cs.columbia.edu/", + "paper": "https://arxiv.org/abs/2303.04137v5", + "citation_bibtex": dedent(r""" + @inproceedings{chi2023diffusionpolicy, + title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, + author={Chi, Cheng and Feng, Siyuan and Du, Yilun and Xu, Zhenjia and Cousineau, Eric and Burchfiel, Benjamin and Song, Shuran}, + booktitle={Proceedings of Robotics: Science and Systems (RSS)}, + year={2023} + }""").lstrip(), + }, + "conq_hose_manipulation": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/view/conq-hose-manipulation-dataset/home", + "citation_bibtex": dedent(r""" + @misc{ConqHoseManipData, + author={Peter Mitrano and Dmitry Berenson}, + title={Conq Hose Manipulation Dataset, v1.15.0}, + year={2024}, + howpublished={https://sites.google.com/view/conq-hose-manipulation-dataset} + }""").lstrip(), + }, + "dlr_edan_shared_control": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://ieeexplore.ieee.org/document/9341156", + "citation_bibtex": dedent(r""" + @inproceedings{vogel_edan_2020, + title = {EDAN - an EMG-Controlled Daily Assistant to Help People with Physical Disabilities}, + language = {en}, + booktitle = {2020 {IEEE}/{RSJ} {International} {Conference} on {Intelligent} {Robots} and {Systems} ({IROS})}, + author = {Vogel, Jörn and Hagengruber, Annette and Iskandar, Maged and Quere, Gabriel and Leipscher, Ulrike and Bustamante, Samuel and Dietrich, Alexander and Hoeppner, Hannes and Leidner, Daniel and Albu-Schäffer, Alin}, + year = {2020} + } + @inproceedings{quere_shared_2020, + address = {Paris, France}, + title = {Shared {Control} {Templates} for {Assistive} {Robotics}}, + language = {en}, + booktitle = {2020 {IEEE} {International} {Conference} on {Robotics} and {Automation} ({ICRA})}, + author = {Quere, Gabriel and Hagengruber, Annette and Iskandar, Maged and Bustamante, Samuel and Leidner, Daniel and Stulp, Freek and Vogel, Joern}, + year = {2020}, + pages = {7}, + }""").lstrip(), + }, + "dlr_sara_grid_clamp": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://www.researchsquare.com/article/rs-3289569/v1", + "citation_bibtex": dedent(r""" + @article{padalkar2023guided, + title={A guided reinforcement learning approach using shared control templates for learning manipulation skills in the real world}, + author={Padalkar, Abhishek and Quere, Gabriel and Raffin, Antonin and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek}, + journal={Research square preprint rs-3289569/v1}, + year={2023} + }""").lstrip(), + }, + "dlr_sara_pour": { + "tasks_col": "language_instruction", + "license": "mit", + "paper": "https://elib.dlr.de/193739/1/padalkar2023rlsct.pdf", + "citation_bibtex": dedent(r""" + @inproceedings{padalkar2023guiding, + title={Guiding Reinforcement Learning with Shared Control Templates}, + author={Padalkar, Abhishek and Quere, Gabriel and Steinmetz, Franz and Raffin, Antonin and Nieuwenhuisen, Matthias and Silv{\'e}rio, Jo{\~a}o and Stulp, Freek}, + booktitle={40th IEEE International Conference on Robotics and Automation, ICRA 2023}, + year={2023}, + organization={IEEE} + }""").lstrip(), + }, + "droid_100": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://droid-dataset.github.io/", + "paper": "https://arxiv.org/abs/2403.12945", + "citation_bibtex": dedent(r""" + @article{khazatsky2024droid, + title = {DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset}, + author = {Alexander Khazatsky and Karl Pertsch and Suraj Nair and Ashwin Balakrishna and Sudeep Dasari and Siddharth Karamcheti and Soroush Nasiriany and Mohan Kumar Srirama and Lawrence Yunliang Chen and Kirsty Ellis and Peter David Fagan and Joey Hejna and Masha Itkina and Marion Lepert and Yecheng Jason Ma and Patrick Tree Miller and Jimmy Wu and Suneel Belkhale and Shivin Dass and Huy Ha and Arhan Jain and Abraham Lee and Youngwoon Lee and Marius Memmel and Sungjae Park and Ilija Radosavovic and Kaiyuan Wang and Albert Zhan and Kevin Black and Cheng Chi and Kyle Beltran Hatch and Shan Lin and Jingpei Lu and Jean Mercat and Abdul Rehman and Pannag R Sanketi and Archit Sharma and Cody Simpson and Quan Vuong and Homer Rich Walke and Blake Wulfe and Ted Xiao and Jonathan Heewon Yang and Arefeh Yavary and Tony Z. Zhao and Christopher Agia and Rohan Baijal and Mateo Guaman Castro and Daphne Chen and Qiuyu Chen and Trinity Chung and Jaimyn Drake and Ethan Paul Foster and Jensen Gao and David Antonio Herrera and Minho Heo and Kyle Hsu and Jiaheng Hu and Donovon Jackson and Charlotte Le and Yunshuang Li and Kevin Lin and Roy Lin and Zehan Ma and Abhiram Maddukuri and Suvir Mirchandani and Daniel Morton and Tony Nguyen and Abigail O'Neill and Rosario Scalise and Derick Seale and Victor Son and Stephen Tian and Emi Tran and Andrew E. Wang and Yilin Wu and Annie Xie and Jingyun Yang and Patrick Yin and Yunchu Zhang and Osbert Bastani and Glen Berseth and Jeannette Bohg and Ken Goldberg and Abhinav Gupta and Abhishek Gupta and Dinesh Jayaraman and Joseph J Lim and Jitendra Malik and Roberto Martín-Martín and Subramanian Ramamoorthy and Dorsa Sadigh and Shuran Song and Jiajun Wu and Michael C. Yip and Yuke Zhu and Thomas Kollar and Sergey Levine and Chelsea Finn}, + year = {2024}, + }""").lstrip(), + }, + "fmb": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://functional-manipulation-benchmark.github.io/", + "paper": "https://arxiv.org/abs/2401.08553", + "citation_bibtex": dedent(r""" + @article{luo2024fmb, + title={FMB: a Functional Manipulation Benchmark for Generalizable Robotic Learning}, + author={Luo, Jianlan and Xu, Charles and Liu, Fangchen and Tan, Liam and Lin, Zipeng and Wu, Jeffrey and Abbeel, Pieter and Levine, Sergey}, + journal={arXiv preprint arXiv:2401.08553}, + year={2024} + }""").lstrip(), + }, + "iamlab_cmu_pickup_insert": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://openreview.net/forum?id=WuBv9-IGDUA", + "paper": "https://arxiv.org/abs/2401.14502", + "citation_bibtex": dedent(r""" + @inproceedings{saxena2023multiresolution, + title={Multi-Resolution Sensing for Real-Time Control with Vision-Language Models}, + author={Saumya Saxena and Mohit Sharma and Oliver Kroemer}, + booktitle={7th Annual Conference on Robot Learning}, + year={2023}, + url={https://openreview.net/forum?id=WuBv9-IGDUA} + }""").lstrip(), + }, + "imperialcollege_sawyer_wrist_cam": { + "tasks_col": "language_instruction", + "license": "mit", + }, + "jaco_play": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://github.com/clvrai/clvr_jaco_play_dataset", + "citation_bibtex": dedent(r""" + @software{dass2023jacoplay, + author = {Dass, Shivin and Yapeter, Jullian and Zhang, Jesse and Zhang, Jiahui + and Pertsch, Karl and Nikolaidis, Stefanos and Lim, Joseph J.}, + title = {CLVR Jaco Play Dataset}, + url = {https://github.com/clvrai/clvr_jaco_play_dataset}, + version = {1.0.0}, + year = {2023} + }""").lstrip(), + }, + "kaist_nonprehensile": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://github.com/JaeHyung-Kim/rlds_dataset_builder", + "citation_bibtex": dedent(r""" + @article{kimpre, + title={Pre-and post-contact policy decomposition for non-prehensile manipulation with zero-shot sim-to-real transfer}, + author={Kim, Minchan and Han, Junhyek and Kim, Jaehyung and Kim, Beomjoon}, + booktitle={2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, + year={2023}, + organization={IEEE} + }""").lstrip(), + }, + "nyu_door_opening_surprising_effectiveness": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://jyopari.github.io/VINN/", + "paper": "https://arxiv.org/abs/2112.01511", + "citation_bibtex": dedent(r""" + @misc{pari2021surprising, + title={The Surprising Effectiveness of Representation Learning for Visual Imitation}, + author={Jyothish Pari and Nur Muhammad Shafiullah and Sridhar Pandian Arunachalam and Lerrel Pinto}, + year={2021}, + eprint={2112.01511}, + archivePrefix={arXiv}, + primaryClass={cs.RO} + }""").lstrip(), + }, + "nyu_franka_play_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://play-to-policy.github.io/", + "paper": "https://arxiv.org/abs/2210.10047", + "citation_bibtex": dedent(r""" + @article{cui2022play, + title = {From Play to Policy: Conditional Behavior Generation from Uncurated Robot Data}, + author = {Cui, Zichen Jeff and Wang, Yibin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel}, + journal = {arXiv preprint arXiv:2210.10047}, + year = {2022} + }""").lstrip(), + }, + "nyu_rot_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://rot-robot.github.io/", + "paper": "https://arxiv.org/abs/2206.15469", + "citation_bibtex": dedent(r""" + @inproceedings{haldar2023watch, + title={Watch and match: Supercharging imitation with regularized optimal transport}, + author={Haldar, Siddhant and Mathur, Vaibhav and Yarats, Denis and Pinto, Lerrel}, + booktitle={Conference on Robot Learning}, + pages={32--43}, + year={2023}, + organization={PMLR} + }""").lstrip(), + }, + "roboturk": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://roboturk.stanford.edu/dataset_real.html", + "paper": "PAPER", + "citation_bibtex": dedent(r""" + @inproceedings{mandlekar2019scaling, + title={Scaling robot supervision to hundreds of hours with roboturk: Robotic manipulation dataset through human reasoning and dexterity}, + author={Mandlekar, Ajay and Booher, Jonathan and Spero, Max and Tung, Albert and Gupta, Anchit and Zhu, Yuke and Garg, Animesh and Savarese, Silvio and Fei-Fei, Li}, + booktitle={2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, + pages={1048--1055}, + year={2019}, + organization={IEEE} + }""").lstrip(), + }, + "stanford_hydra_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/view/hydra-il-2023", + "paper": "https://arxiv.org/abs/2306.17237", + "citation_bibtex": dedent(r""" + @article{belkhale2023hydra, + title={HYDRA: Hybrid Robot Actions for Imitation Learning}, + author={Belkhale, Suneel and Cui, Yuchen and Sadigh, Dorsa}, + journal={arxiv}, + year={2023} + }""").lstrip(), + }, + "stanford_kuka_multimodal_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://sites.google.com/view/visionandtouch", + "paper": "https://arxiv.org/abs/1810.10191", + "citation_bibtex": dedent(r""" + @inproceedings{lee2019icra, + title={Making sense of vision and touch: Self-supervised learning of multimodal representations for contact-rich tasks}, + author={Lee, Michelle A and Zhu, Yuke and Srinivasan, Krishnan and Shah, Parth and Savarese, Silvio and Fei-Fei, Li and Garg, Animesh and Bohg, Jeannette}, + booktitle={2019 IEEE International Conference on Robotics and Automation (ICRA)}, + year={2019}, + url={https://arxiv.org/abs/1810.10191} + }""").lstrip(), + }, + "stanford_robocook": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://hshi74.github.io/robocook/", + "paper": "https://arxiv.org/abs/2306.14447", + "citation_bibtex": dedent(r""" + @article{shi2023robocook, + title={RoboCook: Long-Horizon Elasto-Plastic Object Manipulation with Diverse Tools}, + author={Shi, Haochen and Xu, Huazhe and Clarke, Samuel and Li, Yunzhu and Wu, Jiajun}, + journal={arXiv preprint arXiv:2306.14447}, + year={2023} + }""").lstrip(), + }, + "taco_play": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "url": "https://www.kaggle.com/datasets/oiermees/taco-robot", + "paper": "https://arxiv.org/abs/2209.08959, https://arxiv.org/abs/2210.01911", + "citation_bibtex": dedent(r""" + @inproceedings{rosete2022tacorl, + author = {Erick Rosete-Beas and Oier Mees and Gabriel Kalweit and Joschka Boedecker and Wolfram Burgard}, + title = {Latent Plans for Task Agnostic Offline Reinforcement Learning}, + journal = {Proceedings of the 6th Conference on Robot Learning (CoRL)}, + year = {2022} + } + @inproceedings{mees23hulc2, + title={Grounding Language with Visual Affordances over Unstructured Data}, + author={Oier Mees and Jessica Borja-Diaz and Wolfram Burgard}, + booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)}, + year={2023}, + address = {London, UK} + }""").lstrip(), + }, + "tokyo_u_lsmo": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "URL", + "paper": "https://arxiv.org/abs/2107.05842", + "citation_bibtex": dedent(r""" + @Article{Osa22, + author = {Takayuki Osa}, + journal = {The International Journal of Robotics Research}, + title = {Motion Planning by Learning the Solution Manifold in Trajectory Optimization}, + year = {2022}, + number = {3}, + pages = {291--311}, + volume = {41}, + }""").lstrip(), + }, + "toto": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://toto-benchmark.org/", + "paper": "https://arxiv.org/abs/2306.00942", + "citation_bibtex": dedent(r""" + @inproceedings{zhou2023train, + author={Zhou, Gaoyue and Dean, Victoria and Srirama, Mohan Kumar and Rajeswaran, Aravind and Pari, Jyothish and Hatch, Kyle and Jain, Aryan and Yu, Tianhe and Abbeel, Pieter and Pinto, Lerrel and Finn, Chelsea and Gupta, Abhinav}, + booktitle={2023 IEEE International Conference on Robotics and Automation (ICRA)}, + title={Train Offline, Test Online: A Real Robot Learning Benchmark}, + year={2023}, + }""").lstrip(), + }, + "ucsd_kitchen_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "citation_bibtex": dedent(r""" + @ARTICLE{ucsd_kitchens, + author = {Ge Yan, Kris Wu, and Xiaolong Wang}, + title = {{ucsd kitchens Dataset}}, + year = {2023}, + month = {August} + }""").lstrip(), + }, + "ucsd_pick_and_place_dataset": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://owmcorl.github.io/#", + "paper": "https://arxiv.org/abs/2310.16029", + "citation_bibtex": dedent(r""" + @preprint{Feng2023Finetuning, + title={Finetuning Offline World Models in the Real World}, + author={Yunhai Feng, Nicklas Hansen, Ziyan Xiong, Chandramouli Rajagopalan, Xiaolong Wang}, + year={2023} + }""").lstrip(), + }, + "uiuc_d3field": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://robopil.github.io/d3fields/", + "paper": "https://arxiv.org/abs/2309.16118", + "citation_bibtex": dedent(r""" + @article{wang2023d3field, + title={D^3Field: Dynamic 3D Descriptor Fields for Generalizable Robotic Manipulation}, + author={Wang, Yixuan and Li, Zhuoran and Zhang, Mingtong and Driggs-Campbell, Katherine and Wu, Jiajun and Fei-Fei, Li and Li, Yunzhu}, + journal={arXiv preprint arXiv:}, + year={2023}, + }""").lstrip(), + }, + "usc_cloth_sim": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://uscresl.github.io/dmfd/", + "paper": "https://arxiv.org/abs/2207.10148", + "citation_bibtex": dedent(r""" + @article{salhotra2022dmfd, + author={Salhotra, Gautam and Liu, I-Chun Arthur and Dominguez-Kuhne, Marcus and Sukhatme, Gaurav S.}, + journal={IEEE Robotics and Automation Letters}, + title={Learning Deformable Object Manipulation From Expert Demonstrations}, + year={2022}, + volume={7}, + number={4}, + pages={8775-8782}, + doi={10.1109/LRA.2022.3187843} + }""").lstrip(), + }, + "utaustin_mutex": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://ut-austin-rpl.github.io/MUTEX/", + "paper": "https://arxiv.org/abs/2309.14320", + "citation_bibtex": dedent(r""" + @inproceedings{shah2023mutex, + title={{MUTEX}: Learning Unified Policies from Multimodal Task Specifications}, + author={Rutav Shah and Roberto Mart{\'\i}n-Mart{\'\i}n and Yuke Zhu}, + booktitle={7th Annual Conference on Robot Learning}, + year={2023}, + url={https://openreview.net/forum?id=PwqiqaaEzJ} + }""").lstrip(), + }, + "utokyo_pr2_opening_fridge": { + "tasks_col": "language_instruction", + "license": "mit", + "citation_bibtex": dedent(r""" + @misc{oh2023pr2utokyodatasets, + author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka}, + title={X-Embodiment U-Tokyo PR2 Datasets}, + year={2023}, + url={https://github.com/ojh6404/rlds_dataset_builder}, + }""").lstrip(), + }, + "utokyo_pr2_tabletop_manipulation": { + "tasks_col": "language_instruction", + "license": "mit", + "citation_bibtex": dedent(r""" + @misc{oh2023pr2utokyodatasets, + author={Jihoon Oh and Naoaki Kanazawa and Kento Kawaharazuka}, + title={X-Embodiment U-Tokyo PR2 Datasets}, + year={2023}, + url={https://github.com/ojh6404/rlds_dataset_builder}, + }""").lstrip(), + }, + "utokyo_saytap": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://saytap.github.io/", + "paper": "https://arxiv.org/abs/2306.07580", + "citation_bibtex": dedent(r""" + @article{saytap2023, + author = {Yujin Tang and Wenhao Yu and Jie Tan and Heiga Zen and Aleksandra Faust and + Tatsuya Harada}, + title = {SayTap: Language to Quadrupedal Locomotion}, + eprint = {arXiv:2306.07580}, + url = {https://saytap.github.io}, + note = {https://saytap.github.io}, + year = {2023} + }""").lstrip(), + }, + "utokyo_xarm_bimanual": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "citation_bibtex": dedent(r""" + @misc{matsushima2023weblab, + title={Weblab xArm Dataset}, + author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo}, + year={2023}, + }""").lstrip(), + }, + "utokyo_xarm_pick_and_place": { + "tasks_col": "language_instruction", + "license": "cc-by-4.0", + "citation_bibtex": dedent(r""" + @misc{matsushima2023weblab, + title={Weblab xArm Dataset}, + author={Tatsuya Matsushima and Hiroki Furuta and Yusuke Iwasawa and Yutaka Matsuo}, + year={2023}, + }""").lstrip(), + }, + "viola": { + "tasks_col": "language_instruction", + "license": "mit", + "url": "https://ut-austin-rpl.github.io/VIOLA/", + "paper": "https://arxiv.org/abs/2210.11339", + "citation_bibtex": dedent(r""" + @article{zhu2022viola, + title={VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors}, + author={Zhu, Yifeng and Joshi, Abhishek and Stone, Peter and Zhu, Yuke}, + journal={6th Annual Conference on Robot Learning (CoRL)}, + year={2022} + }""").lstrip(), + }, +} + + +def batch_convert(): + status = {} + logfile = LOCAL_DIR / "conversion_log.txt" + assert set(DATASETS) == {id_.split("/")[1] for id_ in available_datasets} + for num, (name, kwargs) in enumerate(DATASETS.items()): + repo_id = f"lerobot/{name}" + print(f"\nConverting {repo_id} ({num}/{len(DATASETS)})") + print("---------------------------------------------------------") + try: + convert_dataset(repo_id, LOCAL_DIR, **kwargs) + status = f"{repo_id}: success." + with open(logfile, "a") as file: + file.write(status + "\n") + except Exception: + status = f"{repo_id}: failed\n {traceback.format_exc()}" + with open(logfile, "a") as file: + file.write(status + "\n") + continue + + +if __name__ == "__main__": + batch_convert() diff --git a/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py new file mode 100644 index 000000000..bf135043b --- /dev/null +++ b/lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py @@ -0,0 +1,665 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to +2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English +for each of the task performed in the dataset. This will allow to easily train models with task-conditionning. + +We support 3 different scenarios for these tasks (see instructions below): + 1. Single task dataset: all episodes of your dataset have the same single task. + 2. Single task episodes: the episodes of your dataset each contain a single task but they can differ from + one episode to the next. + 3. Multi task episodes: episodes of your dataset may each contain several different tasks. + + +Can you can also provide a robot config .yaml file (not mandatory) to this script via the option +'--robot-config' so that it writes information about the robot (robot type, motors names) this dataset was +recorded with. For now, only Aloha/Koch type robots are supported with this option. + + +# 1. Single task dataset +If your dataset contains a single task, you can simply provide it directly via the CLI with the +'--single-task' option. + +Examples: + +```bash +python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \ + --repo-id lerobot/aloha_sim_insertion_human_image \ + --single-task "Insert the peg into the socket." \ + --robot-config lerobot/configs/robot/aloha.yaml \ + --local-dir data +``` + +```bash +python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \ + --repo-id aliberts/koch_tutorial \ + --single-task "Pick the Lego block and drop it in the box on the right." \ + --robot-config lerobot/configs/robot/koch.yaml \ + --local-dir data +``` + + +# 2. Single task episodes +If your dataset is a multi-task dataset, you have two options to provide the tasks to this script: + +- If your dataset already contains a language instruction column in its parquet file, you can simply provide + this column's name with the '--tasks-col' arg. + + Example: + + ```bash + python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \ + --repo-id lerobot/stanford_kuka_multimodal_dataset \ + --tasks-col "language_instruction" \ + --local-dir data + ``` + +- If your dataset doesn't contain a language instruction, you should provide the path to a .json file with the + '--tasks-path' arg. This file should have the following structure where keys correspond to each + episode_index in the dataset, and values are the language instruction for that episode. + + Example: + + ```json + { + "0": "Do something", + "1": "Do something else", + "2": "Do something", + "3": "Go there", + ... + } + ``` + +# 3. Multi task episodes +If you have multiple tasks per episodes, your dataset should contain a language instruction column in its +parquet file, and you must provide this column's name with the '--tasks-col' arg. + +Example: + +```bash +python lerobot/common/datasets/v2/convert_dataset_v1_to_v2.py \ + --repo-id lerobot/stanford_kuka_multimodal_dataset \ + --tasks-col "language_instruction" \ + --local-dir data +``` +""" + +import argparse +import contextlib +import filecmp +import json +import logging +import math +import shutil +import subprocess +import tempfile +from pathlib import Path + +import datasets +import pyarrow.compute as pc +import pyarrow.parquet as pq +import torch +from datasets import Dataset +from huggingface_hub import HfApi +from huggingface_hub.errors import EntryNotFoundError, HfHubHTTPError +from safetensors.torch import load_file + +from lerobot.common.datasets.utils import ( + DEFAULT_CHUNK_SIZE, + DEFAULT_PARQUET_PATH, + DEFAULT_VIDEO_PATH, + EPISODES_PATH, + INFO_PATH, + STATS_PATH, + TASKS_PATH, + create_branch, + create_lerobot_dataset_card, + flatten_dict, + get_hub_safe_version, + load_json, + unflatten_dict, + write_json, + write_jsonlines, +) +from lerobot.common.datasets.video_utils import ( + VideoFrame, # noqa: F401 + get_image_pixel_channels, + get_video_info, +) +from lerobot.common.utils.utils import init_hydra_config + +V16 = "v1.6" +V20 = "v2.0" + +GITATTRIBUTES_REF = "aliberts/gitattributes_reference" +V1_VIDEO_FILE = "{video_key}_episode_{episode_index:06d}.mp4" +V1_INFO_PATH = "meta_data/info.json" +V1_STATS_PATH = "meta_data/stats.safetensors" + + +def parse_robot_config(config_path: Path, config_overrides: list[str] | None = None) -> tuple[str, dict]: + robot_cfg = init_hydra_config(config_path, config_overrides) + if robot_cfg["robot_type"] in ["aloha", "koch"]: + state_names = [ + f"{arm}_{motor}" if len(robot_cfg["follower_arms"]) > 1 else motor + for arm in robot_cfg["follower_arms"] + for motor in robot_cfg["follower_arms"][arm]["motors"] + ] + action_names = [ + # f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"] + f"{arm}_{motor}" if len(robot_cfg["leader_arms"]) > 1 else motor + for arm in robot_cfg["leader_arms"] + for motor in robot_cfg["leader_arms"][arm]["motors"] + ] + # elif robot_cfg["robot_type"] == "stretch3": TODO + else: + raise NotImplementedError( + "Please provide robot_config={'robot_type': ..., 'names': ...} directly to convert_dataset()." + ) + + return { + "robot_type": robot_cfg["robot_type"], + "names": { + "observation.state": state_names, + "observation.effort": state_names, + "action": action_names, + }, + } + + +def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None: + safetensor_path = v1_dir / V1_STATS_PATH + stats = load_file(safetensor_path) + serialized_stats = {key: value.tolist() for key, value in stats.items()} + serialized_stats = unflatten_dict(serialized_stats) + + json_path = v2_dir / STATS_PATH + json_path.parent.mkdir(exist_ok=True, parents=True) + with open(json_path, "w") as f: + json.dump(serialized_stats, f, indent=4) + + # Sanity check + with open(json_path) as f: + stats_json = json.load(f) + + stats_json = flatten_dict(stats_json) + stats_json = {key: torch.tensor(value) for key, value in stats_json.items()} + for key in stats: + torch.testing.assert_close(stats_json[key], stats[key]) + + +def get_features_from_hf_dataset(dataset: Dataset, robot_config: dict | None = None) -> dict[str, list]: + features = {} + for key, ft in dataset.features.items(): + if isinstance(ft, datasets.Value): + dtype = ft.dtype + shape = (1,) + names = None + if isinstance(ft, datasets.Sequence): + assert isinstance(ft.feature, datasets.Value) + dtype = ft.feature.dtype + shape = (ft.length,) + motor_names = ( + robot_config["names"][key] if robot_config else [f"motor_{i}" for i in range(ft.length)] + ) + assert len(motor_names) == shape[0] + names = {"motors": motor_names} + elif isinstance(ft, datasets.Image): + dtype = "image" + image = dataset[0][key] # Assuming first row + channels = get_image_pixel_channels(image) + shape = (image.height, image.width, channels) + names = ["height", "width", "channel"] + elif ft._type == "VideoFrame": + dtype = "video" + shape = None # Add shape later + names = ["height", "width", "channel"] + + features[key] = { + "dtype": dtype, + "shape": shape, + "names": names, + } + + return features + + +def add_task_index_by_episodes(dataset: Dataset, tasks_by_episodes: dict) -> tuple[Dataset, list[str]]: + df = dataset.to_pandas() + tasks = list(set(tasks_by_episodes.values())) + tasks_to_task_index = {task: task_idx for task_idx, task in enumerate(tasks)} + episodes_to_task_index = {ep_idx: tasks_to_task_index[task] for ep_idx, task in tasks_by_episodes.items()} + df["task_index"] = df["episode_index"].map(episodes_to_task_index).astype(int) + + features = dataset.features + features["task_index"] = datasets.Value(dtype="int64") + dataset = Dataset.from_pandas(df, features=features, split="train") + return dataset, tasks + + +def add_task_index_from_tasks_col( + dataset: Dataset, tasks_col: str +) -> tuple[Dataset, dict[str, list[str]], list[str]]: + df = dataset.to_pandas() + + # HACK: This is to clean some of the instructions in our version of Open X datasets + prefix_to_clean = "tf.Tensor(b'" + suffix_to_clean = "', shape=(), dtype=string)" + df[tasks_col] = df[tasks_col].str.removeprefix(prefix_to_clean).str.removesuffix(suffix_to_clean) + + # Create task_index col + tasks_by_episode = df.groupby("episode_index")[tasks_col].unique().apply(lambda x: x.tolist()).to_dict() + tasks = df[tasks_col].unique().tolist() + tasks_to_task_index = {task: idx for idx, task in enumerate(tasks)} + df["task_index"] = df[tasks_col].map(tasks_to_task_index).astype(int) + + # Build the dataset back from df + features = dataset.features + features["task_index"] = datasets.Value(dtype="int64") + dataset = Dataset.from_pandas(df, features=features, split="train") + dataset = dataset.remove_columns(tasks_col) + + return dataset, tasks, tasks_by_episode + + +def split_parquet_by_episodes( + dataset: Dataset, + total_episodes: int, + total_chunks: int, + output_dir: Path, +) -> list: + table = dataset.data.table + episode_lengths = [] + for ep_chunk in range(total_chunks): + ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk + ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes) + chunk_dir = "/".join(DEFAULT_PARQUET_PATH.split("/")[:-1]).format(episode_chunk=ep_chunk) + (output_dir / chunk_dir).mkdir(parents=True, exist_ok=True) + for ep_idx in range(ep_chunk_start, ep_chunk_end): + ep_table = table.filter(pc.equal(table["episode_index"], ep_idx)) + episode_lengths.insert(ep_idx, len(ep_table)) + output_file = output_dir / DEFAULT_PARQUET_PATH.format( + episode_chunk=ep_chunk, episode_index=ep_idx + ) + pq.write_table(ep_table, output_file) + + return episode_lengths + + +def move_videos( + repo_id: str, + video_keys: list[str], + total_episodes: int, + total_chunks: int, + work_dir: Path, + clean_gittatributes: Path, + branch: str = "main", +) -> None: + """ + HACK: Since HfApi() doesn't provide a way to move files directly in a repo, this function will run git + commands to fetch git lfs video files references to move them into subdirectories without having to + actually download them. + """ + _lfs_clone(repo_id, work_dir, branch) + + videos_moved = False + video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*.mp4")] + if len(video_files) == 0: + video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*/*/*.mp4")] + videos_moved = True # Videos have already been moved + + assert len(video_files) == total_episodes * len(video_keys) + + lfs_untracked_videos = _get_lfs_untracked_videos(work_dir, video_files) + + current_gittatributes = work_dir / ".gitattributes" + if not filecmp.cmp(current_gittatributes, clean_gittatributes, shallow=False): + fix_gitattributes(work_dir, current_gittatributes, clean_gittatributes) + + if lfs_untracked_videos: + fix_lfs_video_files_tracking(work_dir, video_files) + + if videos_moved: + return + + video_dirs = sorted(work_dir.glob("videos*/")) + for ep_chunk in range(total_chunks): + ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk + ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes) + for vid_key in video_keys: + chunk_dir = "/".join(DEFAULT_VIDEO_PATH.split("/")[:-1]).format( + episode_chunk=ep_chunk, video_key=vid_key + ) + (work_dir / chunk_dir).mkdir(parents=True, exist_ok=True) + + for ep_idx in range(ep_chunk_start, ep_chunk_end): + target_path = DEFAULT_VIDEO_PATH.format( + episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_idx + ) + video_file = V1_VIDEO_FILE.format(video_key=vid_key, episode_index=ep_idx) + if len(video_dirs) == 1: + video_path = video_dirs[0] / video_file + else: + for dir in video_dirs: + if (dir / video_file).is_file(): + video_path = dir / video_file + break + + video_path.rename(work_dir / target_path) + + commit_message = "Move video files into chunk subdirectories" + subprocess.run(["git", "add", "."], cwd=work_dir, check=True) + subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True) + subprocess.run(["git", "push"], cwd=work_dir, check=True) + + +def fix_lfs_video_files_tracking(work_dir: Path, lfs_untracked_videos: list[str]) -> None: + """ + HACK: This function fixes the tracking by git lfs which was not properly set on some repos. In that case, + there's no other option than to download the actual files and reupload them with lfs tracking. + """ + for i in range(0, len(lfs_untracked_videos), 100): + files = lfs_untracked_videos[i : i + 100] + try: + subprocess.run(["git", "rm", "--cached", *files], cwd=work_dir, capture_output=True, check=True) + except subprocess.CalledProcessError as e: + print("git rm --cached ERROR:") + print(e.stderr) + subprocess.run(["git", "add", *files], cwd=work_dir, check=True) + + commit_message = "Track video files with git lfs" + subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True) + subprocess.run(["git", "push"], cwd=work_dir, check=True) + + +def fix_gitattributes(work_dir: Path, current_gittatributes: Path, clean_gittatributes: Path) -> None: + shutil.copyfile(clean_gittatributes, current_gittatributes) + subprocess.run(["git", "add", ".gitattributes"], cwd=work_dir, check=True) + subprocess.run(["git", "commit", "-m", "Fix .gitattributes"], cwd=work_dir, check=True) + subprocess.run(["git", "push"], cwd=work_dir, check=True) + + +def _lfs_clone(repo_id: str, work_dir: Path, branch: str) -> None: + subprocess.run(["git", "lfs", "install"], cwd=work_dir, check=True) + repo_url = f"https://huggingface.co/datasets/{repo_id}" + env = {"GIT_LFS_SKIP_SMUDGE": "1"} # Prevent downloading LFS files + subprocess.run( + ["git", "clone", "--branch", branch, "--single-branch", "--depth", "1", repo_url, str(work_dir)], + check=True, + env=env, + ) + + +def _get_lfs_untracked_videos(work_dir: Path, video_files: list[str]) -> list[str]: + lfs_tracked_files = subprocess.run( + ["git", "lfs", "ls-files", "-n"], cwd=work_dir, capture_output=True, text=True, check=True + ) + lfs_tracked_files = set(lfs_tracked_files.stdout.splitlines()) + return [f for f in video_files if f not in lfs_tracked_files] + + +def get_videos_info(repo_id: str, local_dir: Path, video_keys: list[str], branch: str) -> dict: + # Assumes first episode + video_files = [ + DEFAULT_VIDEO_PATH.format(episode_chunk=0, video_key=vid_key, episode_index=0) + for vid_key in video_keys + ] + hub_api = HfApi() + hub_api.snapshot_download( + repo_id=repo_id, repo_type="dataset", local_dir=local_dir, revision=branch, allow_patterns=video_files + ) + videos_info_dict = {} + for vid_key, vid_path in zip(video_keys, video_files, strict=True): + videos_info_dict[vid_key] = get_video_info(local_dir / vid_path) + + return videos_info_dict + + +def convert_dataset( + repo_id: str, + local_dir: Path, + single_task: str | None = None, + tasks_path: Path | None = None, + tasks_col: Path | None = None, + robot_config: dict | None = None, + test_branch: str | None = None, + **card_kwargs, +): + v1 = get_hub_safe_version(repo_id, V16) + v1x_dir = local_dir / V16 / repo_id + v20_dir = local_dir / V20 / repo_id + v1x_dir.mkdir(parents=True, exist_ok=True) + v20_dir.mkdir(parents=True, exist_ok=True) + + hub_api = HfApi() + hub_api.snapshot_download( + repo_id=repo_id, repo_type="dataset", revision=v1, local_dir=v1x_dir, ignore_patterns="videos*/" + ) + branch = "main" + if test_branch: + branch = test_branch + create_branch(repo_id=repo_id, branch=test_branch, repo_type="dataset") + + metadata_v1 = load_json(v1x_dir / V1_INFO_PATH) + dataset = datasets.load_dataset("parquet", data_dir=v1x_dir / "data", split="train") + features = get_features_from_hf_dataset(dataset, robot_config) + video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"] + + if single_task and "language_instruction" in dataset.column_names: + logging.warning( + "'single_task' provided but 'language_instruction' tasks_col found. Using 'language_instruction'.", + ) + single_task = None + tasks_col = "language_instruction" + + # Episodes & chunks + episode_indices = sorted(dataset.unique("episode_index")) + total_episodes = len(episode_indices) + assert episode_indices == list(range(total_episodes)) + total_videos = total_episodes * len(video_keys) + total_chunks = total_episodes // DEFAULT_CHUNK_SIZE + if total_episodes % DEFAULT_CHUNK_SIZE != 0: + total_chunks += 1 + + # Tasks + if single_task: + tasks_by_episodes = {ep_idx: single_task for ep_idx in episode_indices} + dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes) + tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()} + elif tasks_path: + tasks_by_episodes = load_json(tasks_path) + tasks_by_episodes = {int(ep_idx): task for ep_idx, task in tasks_by_episodes.items()} + dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes) + tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()} + elif tasks_col: + dataset, tasks, tasks_by_episodes = add_task_index_from_tasks_col(dataset, tasks_col) + else: + raise ValueError + + assert set(tasks) == {task for ep_tasks in tasks_by_episodes.values() for task in ep_tasks} + tasks = [{"task_index": task_idx, "task": task} for task_idx, task in enumerate(tasks)] + write_jsonlines(tasks, v20_dir / TASKS_PATH) + features["task_index"] = { + "dtype": "int64", + "shape": (1,), + "names": None, + } + + # Videos + if video_keys: + assert metadata_v1.get("video", False) + dataset = dataset.remove_columns(video_keys) + clean_gitattr = Path( + hub_api.hf_hub_download( + repo_id=GITATTRIBUTES_REF, repo_type="dataset", local_dir=local_dir, filename=".gitattributes" + ) + ).absolute() + with tempfile.TemporaryDirectory() as tmp_video_dir: + move_videos( + repo_id, video_keys, total_episodes, total_chunks, Path(tmp_video_dir), clean_gitattr, branch + ) + videos_info = get_videos_info(repo_id, v1x_dir, video_keys=video_keys, branch=branch) + for key in video_keys: + features[key]["shape"] = ( + videos_info[key].pop("video.height"), + videos_info[key].pop("video.width"), + videos_info[key].pop("video.channels"), + ) + features[key]["video_info"] = videos_info[key] + assert math.isclose(videos_info[key]["video.fps"], metadata_v1["fps"], rel_tol=1e-3) + if "encoding" in metadata_v1: + assert videos_info[key]["video.pix_fmt"] == metadata_v1["encoding"]["pix_fmt"] + else: + assert metadata_v1.get("video", 0) == 0 + videos_info = None + + # Split data into 1 parquet file by episode + episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir) + + if robot_config is not None: + robot_type = robot_config["robot_type"] + repo_tags = [robot_type] + else: + robot_type = "unknown" + repo_tags = None + + # Episodes + episodes = [ + {"episode_index": ep_idx, "tasks": tasks_by_episodes[ep_idx], "length": episode_lengths[ep_idx]} + for ep_idx in episode_indices + ] + write_jsonlines(episodes, v20_dir / EPISODES_PATH) + + # Assemble metadata v2.0 + metadata_v2_0 = { + "codebase_version": V20, + "robot_type": robot_type, + "total_episodes": total_episodes, + "total_frames": len(dataset), + "total_tasks": len(tasks), + "total_videos": total_videos, + "total_chunks": total_chunks, + "chunks_size": DEFAULT_CHUNK_SIZE, + "fps": metadata_v1["fps"], + "splits": {"train": f"0:{total_episodes}"}, + "data_path": DEFAULT_PARQUET_PATH, + "video_path": DEFAULT_VIDEO_PATH if video_keys else None, + "features": features, + } + write_json(metadata_v2_0, v20_dir / INFO_PATH) + convert_stats_to_json(v1x_dir, v20_dir) + card = create_lerobot_dataset_card(tags=repo_tags, dataset_info=metadata_v2_0, **card_kwargs) + + with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): + hub_api.delete_folder(repo_id=repo_id, path_in_repo="data", repo_type="dataset", revision=branch) + + with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): + hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta_data", repo_type="dataset", revision=branch) + + with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): + hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta", repo_type="dataset", revision=branch) + + hub_api.upload_folder( + repo_id=repo_id, + path_in_repo="data", + folder_path=v20_dir / "data", + repo_type="dataset", + revision=branch, + ) + hub_api.upload_folder( + repo_id=repo_id, + path_in_repo="meta", + folder_path=v20_dir / "meta", + repo_type="dataset", + revision=branch, + ) + + card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=branch) + + if not test_branch: + create_branch(repo_id=repo_id, branch=V20, repo_type="dataset") + + +def main(): + parser = argparse.ArgumentParser() + task_args = parser.add_mutually_exclusive_group(required=True) + + parser.add_argument( + "--repo-id", + type=str, + required=True, + help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", + ) + task_args.add_argument( + "--single-task", + type=str, + help="A short but accurate description of the single task performed in the dataset.", + ) + task_args.add_argument( + "--tasks-col", + type=str, + help="The name of the column containing language instructions", + ) + task_args.add_argument( + "--tasks-path", + type=Path, + help="The path to a .json file containing one language instruction for each episode_index", + ) + parser.add_argument( + "--robot-config", + type=Path, + default=None, + help="Path to the robot's config yaml the dataset during conversion.", + ) + parser.add_argument( + "--robot-overrides", + type=str, + nargs="*", + help="Any key=value arguments to override the robot config values (use dots for.nested=overrides)", + ) + parser.add_argument( + "--local-dir", + type=Path, + default=None, + help="Local directory to store the dataset during conversion. Defaults to /tmp/lerobot_dataset_v2", + ) + parser.add_argument( + "--license", + type=str, + default="apache-2.0", + help="Repo license. Must be one of https://huggingface.co/docs/hub/repositories-licenses. Defaults to mit.", + ) + parser.add_argument( + "--test-branch", + type=str, + default=None, + help="Repo branch to test your conversion first (e.g. 'v2.0.test')", + ) + + args = parser.parse_args() + if not args.local_dir: + args.local_dir = Path("/tmp/lerobot_dataset_v2") + + robot_config = parse_robot_config(args.robot_config, args.robot_overrides) if args.robot_config else None + del args.robot_config, args.robot_overrides + + convert_dataset(**vars(args), robot_config=robot_config) + + +if __name__ == "__main__": + main() diff --git a/lerobot/common/datasets/video_utils.py b/lerobot/common/datasets/video_utils.py index 4d4ac6b0a..8ed3318dd 100644 --- a/lerobot/common/datasets/video_utils.py +++ b/lerobot/common/datasets/video_utils.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import subprocess import warnings @@ -25,47 +26,11 @@ import torch import torchvision from datasets.features.features import register_feature - - -def load_from_videos( - item: dict[str, torch.Tensor], - video_frame_keys: list[str], - videos_dir: Path, - tolerance_s: float, - backend: str = "pyav", -): - """Note: When using data workers (e.g. DataLoader with num_workers>0), do not call this function - in the main process (e.g. by using a second Dataloader with num_workers=0). It will result in a Segmentation Fault. - This probably happens because a memory reference to the video loader is created in the main process and a - subprocess fails to access it. - """ - # since video path already contains "videos" (e.g. videos_dir="data/videos", path="videos/episode_0.mp4") - data_dir = videos_dir.parent - - for key in video_frame_keys: - if isinstance(item[key], list): - # load multiple frames at once (expected when delta_timestamps is not None) - timestamps = [frame["timestamp"] for frame in item[key]] - paths = [frame["path"] for frame in item[key]] - if len(set(paths)) > 1: - raise NotImplementedError("All video paths are expected to be the same for now.") - video_path = data_dir / paths[0] - - frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) - item[key] = frames - else: - # load one frame - timestamps = [item[key]["timestamp"]] - video_path = data_dir / item[key]["path"] - - frames = decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend) - item[key] = frames[0] - - return item +from PIL import Image def decode_video_frames_torchvision( - video_path: str, + video_path: Path | str, timestamps: list[float], tolerance_s: float, backend: str = "pyav", @@ -163,8 +128,8 @@ def decode_video_frames_torchvision( def encode_video_frames( - imgs_dir: Path, - video_path: Path, + imgs_dir: Path | str, + video_path: Path | str, fps: int, vcodec: str = "libsvtav1", pix_fmt: str = "yuv420p", @@ -247,3 +212,104 @@ def __call__(self): ) # to make VideoFrame available in HuggingFace `datasets` register_feature(VideoFrame, "VideoFrame") + + +def get_audio_info(video_path: Path | str) -> dict: + ffprobe_audio_cmd = [ + "ffprobe", + "-v", + "error", + "-select_streams", + "a:0", + "-show_entries", + "stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration", + "-of", + "json", + str(video_path), + ] + result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode != 0: + raise RuntimeError(f"Error running ffprobe: {result.stderr}") + + info = json.loads(result.stdout) + audio_stream_info = info["streams"][0] if info.get("streams") else None + if audio_stream_info is None: + return {"has_audio": False} + + # Return the information, defaulting to None if no audio stream is present + return { + "has_audio": True, + "audio.channels": audio_stream_info.get("channels", None), + "audio.codec": audio_stream_info.get("codec_name", None), + "audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None, + "audio.sample_rate": int(audio_stream_info["sample_rate"]) + if audio_stream_info.get("sample_rate") + else None, + "audio.bit_depth": audio_stream_info.get("bit_depth", None), + "audio.channel_layout": audio_stream_info.get("channel_layout", None), + } + + +def get_video_info(video_path: Path | str) -> dict: + ffprobe_video_cmd = [ + "ffprobe", + "-v", + "error", + "-select_streams", + "v:0", + "-show_entries", + "stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt", + "-of", + "json", + str(video_path), + ] + result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if result.returncode != 0: + raise RuntimeError(f"Error running ffprobe: {result.stderr}") + + info = json.loads(result.stdout) + video_stream_info = info["streams"][0] + + # Calculate fps from r_frame_rate + r_frame_rate = video_stream_info["r_frame_rate"] + num, denom = map(int, r_frame_rate.split("/")) + fps = num / denom + + pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"]) + + video_info = { + "video.fps": fps, + "video.height": video_stream_info["height"], + "video.width": video_stream_info["width"], + "video.channels": pixel_channels, + "video.codec": video_stream_info["codec_name"], + "video.pix_fmt": video_stream_info["pix_fmt"], + "video.is_depth_map": False, + **get_audio_info(video_path), + } + + return video_info + + +def get_video_pixel_channels(pix_fmt: str) -> int: + if "gray" in pix_fmt or "depth" in pix_fmt or "monochrome" in pix_fmt: + return 1 + elif "rgba" in pix_fmt or "yuva" in pix_fmt: + return 4 + elif "rgb" in pix_fmt or "yuv" in pix_fmt: + return 3 + else: + raise ValueError("Unknown format") + + +def get_image_pixel_channels(image: Image): + if image.mode == "L": + return 1 # Grayscale + elif image.mode == "LA": + return 2 # Grayscale + Alpha + elif image.mode == "RGB": + return 3 # RGB + elif image.mode == "RGBA": + return 4 # RGBA + else: + raise ValueError("Unknown format") diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 54f24ea84..71e913cd9 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -19,7 +19,7 @@ from omegaconf import DictConfig -def make_env(cfg: DictConfig, n_envs: int | None = None) -> gym.vector.VectorEnv | None: +def make_env(cfg: DictConfig, n_envs: int | None = None, out_dir: str = "") -> gym.vector.VectorEnv | None: """Makes a gym vector environment according to the evaluation config. n_envs can be used to override eval.batch_size in the configuration. Must be at least 1. diff --git a/lerobot/common/logger.py b/lerobot/common/logger.py index bf578fcc5..06bc7eb65 100644 --- a/lerobot/common/logger.py +++ b/lerobot/common/logger.py @@ -189,7 +189,7 @@ def save_training_state( training_state["scheduler"] = scheduler.state_dict() torch.save(training_state, save_dir / self.training_state_file_name) - def save_checkpont( + def save_checkpoint( self, train_step: int, policy: Policy, @@ -208,6 +208,8 @@ def save_checkpont( checkpoint_dir / self.pretrained_model_dir_name, policy, wandb_artifact_name=wandb_artifact_name ) self.save_training_state(checkpoint_dir, train_step, optimizer, scheduler) + if self.last_checkpoint_dir.exists() or self.last_checkpoint_dir.is_symlink(): + self.last_checkpoint_dir.unlink() # Remove the existing symlink or file os.symlink(checkpoint_dir.absolute(), self.last_checkpoint_dir) def load_last_training_state(self, optimizer: Optimizer, scheduler: LRScheduler | None) -> int: diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index bd3692ace..531f49e4d 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -67,6 +67,7 @@ class DiffusionConfig: use_group_norm: Whether to replace batch normalization with group normalization in the backbone. The group sizes are set to be about 16 (to be precise, feature_dim // 16). spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax. + use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view. down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet. You may provide a variable number of dimensions, therefore also controlling the degree of downsampling. @@ -130,6 +131,7 @@ class DiffusionConfig: pretrained_backbone_weights: str | None = None use_group_norm: bool = True spatial_softmax_num_keypoints: int = 32 + use_separate_rgb_encoder_per_camera: bool = False # Unet. down_dims: tuple[int, ...] = (512, 1024, 2048) kernel_size: int = 5 diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 308a8be3c..9ba562600 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -182,8 +182,13 @@ def __init__(self, config: DiffusionConfig): self._use_env_state = False if num_images > 0: self._use_images = True - self.rgb_encoder = DiffusionRgbEncoder(config) - global_cond_dim += self.rgb_encoder.feature_dim * num_images + if self.config.use_separate_rgb_encoder_per_camera: + encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)] + self.rgb_encoder = nn.ModuleList(encoders) + global_cond_dim += encoders[0].feature_dim * num_images + else: + self.rgb_encoder = DiffusionRgbEncoder(config) + global_cond_dim += self.rgb_encoder.feature_dim * num_images if "observation.environment_state" in config.input_shapes: self._use_env_state = True global_cond_dim += config.input_shapes["observation.environment_state"][0] @@ -239,16 +244,32 @@ def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: """Encode image features and concatenate them all together along with the state vector.""" batch_size, n_obs_steps = batch["observation.state"].shape[:2] global_cond_feats = [batch["observation.state"]] - # Extract image feature (first combine batch, sequence, and camera index dims). + # Extract image features. if self._use_images: - img_features = self.rgb_encoder( - einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...") - ) - # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the - # feature dim (effectively concatenating the camera features). - img_features = einops.rearrange( - img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps - ) + if self.config.use_separate_rgb_encoder_per_camera: + # Combine batch and sequence dims while rearranging to make the camera index dimension first. + images_per_camera = einops.rearrange(batch["observation.images"], "b s n ... -> n (b s) ...") + img_features_list = torch.cat( + [ + encoder(images) + for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True) + ] + ) + # Separate batch and sequence dims back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) + else: + # Combine batch, sequence, and "which camera" dims before passing to shared encoder. + img_features = self.rgb_encoder( + einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...") + ) + # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the + # feature dim (effectively concatenating the camera features). + img_features = einops.rearrange( + img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps + ) global_cond_feats.append(img_features) if self._use_env_state: diff --git a/lerobot/common/robot_devices/cameras/intelrealsense.py b/lerobot/common/robot_devices/cameras/intelrealsense.py index 4806bf785..84ac540f2 100644 --- a/lerobot/common/robot_devices/cameras/intelrealsense.py +++ b/lerobot/common/robot_devices/cameras/intelrealsense.py @@ -5,80 +5,99 @@ import argparse import concurrent.futures import logging +import math import shutil import threading import time import traceback +from collections import Counter from dataclasses import dataclass, replace from pathlib import Path from threading import Thread -import cv2 import numpy as np -import pyrealsense2 as rs from PIL import Image from lerobot.common.robot_devices.utils import ( RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError, + busy_wait, ) from lerobot.common.utils.utils import capture_timestamp_utc -from lerobot.scripts.control_robot import busy_wait SERIAL_NUMBER_INDEX = 1 -def find_camera_indices(raise_when_empty=True) -> list[int]: +def find_cameras(raise_when_empty=True, mock=False) -> list[dict]: """ - Find the serial numbers of the Intel RealSense cameras + Find the names and the serial numbers of the Intel RealSense cameras connected to the computer. """ - camera_ids = [] + if mock: + import tests.mock_pyrealsense2 as rs + else: + import pyrealsense2 as rs + + cameras = [] for device in rs.context().query_devices(): serial_number = int(device.get_info(rs.camera_info(SERIAL_NUMBER_INDEX))) - camera_ids.append(serial_number) + name = device.get_info(rs.camera_info.name) + cameras.append( + { + "serial_number": serial_number, + "name": name, + } + ) - if raise_when_empty and len(camera_ids) == 0: + if raise_when_empty and len(cameras) == 0: raise OSError( "Not a single camera was detected. Try re-plugging, or re-installing `librealsense` and its python wrapper `pyrealsense2`, or updating the firmware." ) - return camera_ids + return cameras -def save_image(img_array, camera_idx, frame_index, images_dir): +def save_image(img_array, serial_number, frame_index, images_dir): try: img = Image.fromarray(img_array) - path = images_dir / f"camera_{camera_idx}_frame_{frame_index:06d}.png" + path = images_dir / f"camera_{serial_number}_frame_{frame_index:06d}.png" path.parent.mkdir(parents=True, exist_ok=True) img.save(str(path), quality=100) logging.info(f"Saved image: {path}") except Exception as e: - logging.error(f"Failed to save image for camera {camera_idx} frame {frame_index}: {e}") + logging.error(f"Failed to save image for camera {serial_number} frame {frame_index}: {e}") def save_images_from_cameras( images_dir: Path, - camera_ids: list[int] | None = None, + serial_numbers: list[int] | None = None, fps=None, width=None, height=None, record_time_s=2, + mock=False, ): """ Initializes all the cameras and saves images to the directory. Useful to visually identify the camera - associated to a given camera index. + associated to a given serial number. """ - if camera_ids is None: - camera_ids = find_camera_indices() + if serial_numbers is None or len(serial_numbers) == 0: + camera_infos = find_cameras(mock=mock) + serial_numbers = [cam["serial_number"] for cam in camera_infos] + + if mock: + import tests.mock_cv2 as cv2 + else: + import cv2 print("Connecting cameras") cameras = [] - for cam_idx in camera_ids: - camera = IntelRealSenseCamera(cam_idx, fps=fps, width=width, height=height) + for cam_sn in serial_numbers: + print(f"{cam_sn=}") + camera = IntelRealSenseCamera(cam_sn, fps=fps, width=width, height=height, mock=mock) camera.connect() print( - f"IntelRealSenseCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})" + f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})" ) cameras.append(camera) @@ -93,7 +112,7 @@ def save_images_from_cameras( frame_index = 0 start_time = time.perf_counter() try: - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: while True: now = time.perf_counter() @@ -103,12 +122,13 @@ def save_images_from_cameras( image = camera.read() if fps is None else camera.async_read() if image is None: print("No Frame") + bgr_converted_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) executor.submit( save_image, bgr_converted_image, - camera.camera_index, + camera.serial_number, frame_index, images_dir, ) @@ -140,6 +160,7 @@ class IntelRealSenseCameraConfig: IntelRealSenseCameraConfig(90, 640, 480) IntelRealSenseCameraConfig(30, 1280, 720) IntelRealSenseCameraConfig(30, 640, 480, use_depth=True) + IntelRealSenseCameraConfig(30, 640, 480, rotation=90) ``` """ @@ -147,8 +168,11 @@ class IntelRealSenseCameraConfig: width: int | None = None height: int | None = None color_mode: str = "rgb" + channels: int | None = None use_depth: bool = False force_hardware_reset: bool = True + rotation: int | None = None + mock: bool = False def __post_init__(self): if self.color_mode not in ["rgb", "bgr"]: @@ -156,19 +180,25 @@ def __post_init__(self): f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." ) - if (self.fps or self.width or self.height) and not (self.fps and self.width and self.height): + self.channels = 3 + + at_least_one_is_not_none = self.fps is not None or self.width is not None or self.height is not None + at_least_one_is_none = self.fps is None or self.width is None or self.height is None + if at_least_one_is_not_none and at_least_one_is_none: raise ValueError( "For `fps`, `width` and `height`, either all of them need to be set, or none of them, " f"but {self.fps=}, {self.width=}, {self.height=} were provided." ) + if self.rotation not in [-90, None, 90, 180]: + raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") + class IntelRealSenseCamera: """ The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras: - - camera_index corresponds to the serial number of the camera, - - camera_index won't randomly change as it can be the case of OpenCVCamera for Linux, - - read is more reliable than OpenCVCamera, + - is instantiated with the serial number of the camera - won't randomly change as it can be the case of OpenCVCamera for Linux, + - can also be instantiated with the camera's name — if it's unique — using IntelRealSenseCamera.init_from_name(), - depth map can be returned. To find the camera indices of your cameras, you can run our utility script that will save a few frames for each camera: @@ -181,8 +211,10 @@ class IntelRealSenseCamera: Example of usage: ```python - camera_index = 128422271347 - camera = IntelRealSenseCamera(camera_index) + # Instantiate with its serial number + camera = IntelRealSenseCamera(128422271347) + # Or by its name if it's unique + camera = IntelRealSenseCamera.init_from_name("Intel RealSense D405") camera.connect() color_image = camera.read() # when done using the camera, consider disconnecting @@ -191,19 +223,19 @@ class IntelRealSenseCamera: Example of changing default fps, width, height and color_mode: ```python - camera = IntelRealSenseCamera(camera_index, fps=30, width=1280, height=720) + camera = IntelRealSenseCamera(serial_number, fps=30, width=1280, height=720) camera = connect() # applies the settings, might error out if these settings are not compatible with the camera - camera = IntelRealSenseCamera(camera_index, fps=90, width=640, height=480) + camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480) camera = connect() - camera = IntelRealSenseCamera(camera_index, fps=90, width=640, height=480, color_mode="bgr") + camera = IntelRealSenseCamera(serial_number, fps=90, width=640, height=480, color_mode="bgr") camera = connect() ``` Example of returning depth: ```python - camera = IntelRealSenseCamera(camera_index, use_depth=True) + camera = IntelRealSenseCamera(serial_number, use_depth=True) camera.connect() color_image, depth_map = camera.read() ``` @@ -211,7 +243,7 @@ class IntelRealSenseCamera: def __init__( self, - camera_index: int, + serial_number: int, config: IntelRealSenseCameraConfig | None = None, **kwargs, ): @@ -221,13 +253,15 @@ def __init__( # Overwrite the config arguments using kwargs config = replace(config, **kwargs) - self.camera_index = camera_index + self.serial_number = serial_number self.fps = config.fps self.width = config.width self.height = config.height + self.channels = config.channels self.color_mode = config.color_mode self.use_depth = config.use_depth self.force_hardware_reset = config.force_hardware_reset + self.mock = config.mock self.camera = None self.is_connected = False @@ -237,14 +271,55 @@ def __init__( self.depth_map = None self.logs = {} + if self.mock: + import tests.mock_cv2 as cv2 + else: + import cv2 + + # TODO(alibets): Do we keep original width/height or do we define them after rotation? + self.rotation = None + if config.rotation == -90: + self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE + elif config.rotation == 90: + self.rotation = cv2.ROTATE_90_CLOCKWISE + elif config.rotation == 180: + self.rotation = cv2.ROTATE_180 + + @classmethod + def init_from_name(cls, name: str, config: IntelRealSenseCameraConfig | None = None, **kwargs): + camera_infos = find_cameras() + camera_names = [cam["name"] for cam in camera_infos] + this_name_count = Counter(camera_names)[name] + if this_name_count > 1: + # TODO(aliberts): Test this with multiple identical cameras (Aloha) + raise ValueError( + f"Multiple {name} cameras have been detected. Please use their serial number to instantiate them." + ) + + name_to_serial_dict = {cam["name"]: cam["serial_number"] for cam in camera_infos} + cam_sn = name_to_serial_dict[name] + + if config is None: + config = IntelRealSenseCameraConfig() + + # Overwrite the config arguments using kwargs + config = replace(config, **kwargs) + + return cls(serial_number=cam_sn, config=config, **kwargs) + def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError( - f"IntelRealSenseCamera({self.camera_index}) is already connected." + f"IntelRealSenseCamera({self.serial_number}) is already connected." ) + if self.mock: + import tests.mock_pyrealsense2 as rs + else: + import pyrealsense2 as rs + config = rs.config() - config.enable_device(str(self.camera_index)) + config.enable_device(str(self.serial_number)) if self.fps and self.width and self.height: # TODO(rcadene): can we set rgb8 directly? @@ -260,7 +335,7 @@ def connect(self): self.camera = rs.pipeline() try: - self.camera.start(config) + profile = self.camera.start(config) is_camera_open = True except RuntimeError: is_camera_open = False @@ -269,15 +344,41 @@ def connect(self): # If the camera doesn't work, display the camera indices corresponding to # valid cameras. if not is_camera_open: - # Verify that the provided `camera_index` is valid before printing the traceback - available_cam_ids = find_camera_indices() - if self.camera_index not in available_cam_ids: + # Verify that the provided `serial_number` is valid before printing the traceback + camera_infos = find_cameras() + serial_numbers = [cam["serial_number"] for cam in camera_infos] + if self.serial_number not in serial_numbers: raise ValueError( - f"`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. " - "To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`." + f"`serial_number` is expected to be one of these available cameras {serial_numbers}, but {self.serial_number} is provided instead. " + "To find the serial number you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`." ) - raise OSError(f"Can't access IntelRealSenseCamera({self.camera_index}).") + raise OSError(f"Can't access IntelRealSenseCamera({self.serial_number}).") + + color_stream = profile.get_stream(rs.stream.color) + color_profile = color_stream.as_video_stream_profile() + actual_fps = color_profile.fps() + actual_width = color_profile.width() + actual_height = color_profile.height() + + # Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30) + if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3): + # Using `OSError` since it's a broad that encompasses issues related to device communication + raise OSError( + f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}." + ) + if self.width is not None and self.width != actual_width: + raise OSError( + f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}." + ) + if self.height is not None and self.height != actual_height: + raise OSError( + f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}." + ) + + self.fps = round(actual_fps) + self.width = round(actual_width) + self.height = round(actual_height) self.is_connected = True @@ -293,9 +394,14 @@ def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndar """ if not self.is_connected: raise RobotDeviceNotConnectedError( - f"IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first." + f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) + if self.mock: + import tests.mock_cv2 as cv2 + else: + import cv2 + start_time = time.perf_counter() frame = self.camera.wait_for_frames(timeout_ms=5000) @@ -303,7 +409,7 @@ def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndar color_frame = frame.get_color_frame() if not color_frame: - raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.camera_index}).") + raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.serial_number}).") color_image = np.asanyarray(color_frame.get_data()) @@ -323,6 +429,9 @@ def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndar f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead." ) + if self.rotation is not None: + color_image = cv2.rotate(color_image, self.rotation) + # log the number of seconds it took to read the image self.logs["delta_timestamp_s"] = time.perf_counter() - start_time @@ -332,7 +441,7 @@ def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndar if self.use_depth: depth_frame = frame.get_depth_frame() if not depth_frame: - raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.camera_index}).") + raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.serial_number}).") depth_map = np.asanyarray(depth_frame.get_data()) @@ -342,12 +451,15 @@ def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndar f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead." ) + if self.rotation is not None: + depth_map = cv2.rotate(depth_map, self.rotation) + return color_image, depth_map else: return color_image def read_loop(self): - while self.stop_event is None or not self.stop_event.is_set(): + while not self.stop_event.is_set(): if self.use_depth: self.color_image, self.depth_map = self.read() else: @@ -357,7 +469,7 @@ def async_read(self): """Access the latest color image""" if not self.is_connected: raise RobotDeviceNotConnectedError( - f"IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first." + f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) if self.thread is None: @@ -368,6 +480,7 @@ def async_read(self): num_tries = 0 while self.color_image is None: + # TODO(rcadene, aliberts): intelrealsense has diverged compared to opencv over here num_tries += 1 time.sleep(1 / self.fps) if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()): @@ -383,7 +496,7 @@ def async_read(self): def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError( - f"IntelRealSenseCamera({self.camera_index}) is not connected. Try running `camera.connect()` first." + f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) if self.thread is not None and self.thread.is_alive(): @@ -408,11 +521,11 @@ def __del__(self): description="Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset." ) parser.add_argument( - "--camera-ids", + "--serial-numbers", type=int, nargs="*", default=None, - help="List of camera indices used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.", + help="List of serial numbers used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.", ) parser.add_argument( "--fps", diff --git a/lerobot/common/robot_devices/cameras/opencv.py b/lerobot/common/robot_devices/cameras/opencv.py index b066a451a..d284cf55a 100644 --- a/lerobot/common/robot_devices/cameras/opencv.py +++ b/lerobot/common/robot_devices/cameras/opencv.py @@ -13,7 +13,6 @@ from pathlib import Path from threading import Thread -import cv2 import numpy as np from PIL import Image @@ -24,10 +23,6 @@ ) from lerobot.common.utils.utils import capture_timestamp_utc -# Use 1 thread to avoid blocking the main thread. Especially useful during data collection -# when other threads are used to save the images. -cv2.setNumThreads(1) - # The maximum opencv device index depends on your operating system. For instance, # if you have 3 cameras, they should be associated to index 0, 1, and 2. This is the case # on MacOS. However, on Ubuntu, the indices are different like 6, 16, 23. @@ -36,20 +31,44 @@ MAX_OPENCV_INDEX = 60 -def find_camera_indices(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX): +def find_cameras(raise_when_empty=False, max_index_search_range=MAX_OPENCV_INDEX, mock=False) -> list[dict]: + cameras = [] if platform.system() == "Linux": - # Linux uses camera ports print("Linux detected. Finding available camera indices through scanning '/dev/video*' ports") - possible_camera_ids = [] - for port in Path("/dev").glob("video*"): - camera_idx = int(str(port).replace("/dev/video", "")) - possible_camera_ids.append(camera_idx) + possible_ports = [str(port) for port in Path("/dev").glob("video*")] + ports = _find_cameras(possible_ports, mock=mock) + for port in ports: + cameras.append( + { + "port": port, + "index": int(port.removeprefix("/dev/video")), + } + ) else: print( "Mac or Windows detected. Finding available camera indices through " f"scanning all indices from 0 to {MAX_OPENCV_INDEX}" ) - possible_camera_ids = range(max_index_search_range) + possible_indices = range(max_index_search_range) + indices = _find_cameras(possible_indices, mock=mock) + for index in indices: + cameras.append( + { + "port": None, + "index": index, + } + ) + + return cameras + + +def _find_cameras( + possible_camera_ids: list[int | str], raise_when_empty=False, mock=False +) -> list[int | str]: + if mock: + import tests.mock_cv2 as cv2 + else: + import cv2 camera_ids = [] for camera_idx in possible_camera_ids: @@ -70,6 +89,16 @@ def find_camera_indices(raise_when_empty=False, max_index_search_range=MAX_OPENC return camera_ids +def is_valid_unix_path(path: str) -> bool: + """Note: if 'path' points to a symlink, this will return True only if the target exists""" + p = Path(path) + return p.is_absolute() and p.exists() + + +def get_camera_index_from_unix_port(port: Path) -> int: + return int(str(port.resolve()).removeprefix("/dev/video")) + + def save_image(img_array, camera_index, frame_index, images_dir): img = Image.fromarray(img_array) path = images_dir / f"camera_{camera_index:02d}_frame_{frame_index:06d}.png" @@ -78,19 +107,26 @@ def save_image(img_array, camera_index, frame_index, images_dir): def save_images_from_cameras( - images_dir: Path, camera_ids: list[int] | None = None, fps=None, width=None, height=None, record_time_s=2 + images_dir: Path, + camera_ids: list | None = None, + fps=None, + width=None, + height=None, + record_time_s=2, + mock=False, ): """ Initializes all the cameras and saves images to the directory. Useful to visually identify the camera associated to a given camera index. """ - if camera_ids is None: - camera_ids = find_camera_indices() + if camera_ids is None or len(camera_ids) == 0: + camera_infos = find_cameras(mock=mock) + camera_ids = [cam["index"] for cam in camera_infos] print("Connecting cameras") cameras = [] for cam_idx in camera_ids: - camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height) + camera = OpenCVCamera(cam_idx, fps=fps, width=width, height=height, mock=mock) camera.connect() print( f"OpenCVCamera({camera.camera_index}, fps={camera.fps}, width={camera.width}, " @@ -108,7 +144,7 @@ def save_images_from_cameras( print(f"Saving images to {images_dir}") frame_index = 0 start_time = time.perf_counter() - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: while True: now = time.perf_counter() @@ -129,11 +165,11 @@ def save_images_from_cameras( dt_s = time.perf_counter() - now busy_wait(1 / fps - dt_s) + print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}") + if time.perf_counter() - start_time > record_time_s: break - print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}") - frame_index += 1 print(f"Images have been saved to {images_dir}") @@ -156,6 +192,9 @@ class OpenCVCameraConfig: width: int | None = None height: int | None = None color_mode: str = "rgb" + channels: int | None = None + rotation: int | None = None + mock: bool = False def __post_init__(self): if self.color_mode not in ["rgb", "bgr"]: @@ -163,6 +202,11 @@ def __post_init__(self): f"`color_mode` is expected to be 'rgb' or 'bgr', but {self.color_mode} is provided." ) + self.channels = 3 + + if self.rotation not in [-90, None, 90, 180]: + raise ValueError(f"`rotation` must be in [-90, None, 90, 180] (got {self.rotation})") + class OpenCVCamera: """ @@ -203,7 +247,7 @@ class OpenCVCamera: ``` """ - def __init__(self, camera_index: int, config: OpenCVCameraConfig | None = None, **kwargs): + def __init__(self, camera_index: int | str, config: OpenCVCameraConfig | None = None, **kwargs): if config is None: config = OpenCVCameraConfig() @@ -211,10 +255,25 @@ def __init__(self, camera_index: int, config: OpenCVCameraConfig | None = None, config = replace(config, **kwargs) self.camera_index = camera_index + self.port = None + + # Linux uses ports for connecting to cameras + if platform.system() == "Linux": + if isinstance(self.camera_index, int): + self.port = Path(f"/dev/video{self.camera_index}") + elif isinstance(self.camera_index, str) and is_valid_unix_path(self.camera_index): + self.port = Path(self.camera_index) + # Retrieve the camera index from a potentially symlinked path + self.camera_index = get_camera_index_from_unix_port(self.port) + else: + raise ValueError(f"Please check the provided camera_index: {camera_index}") + self.fps = config.fps self.width = config.width self.height = config.height + self.channels = config.channels self.color_mode = config.color_mode + self.mock = config.mock self.camera = None self.is_connected = False @@ -223,43 +282,60 @@ def __init__(self, camera_index: int, config: OpenCVCameraConfig | None = None, self.color_image = None self.logs = {} + if self.mock: + import tests.mock_cv2 as cv2 + else: + import cv2 + + # TODO(aliberts): Do we keep original width/height or do we define them after rotation? + self.rotation = None + if config.rotation == -90: + self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE + elif config.rotation == 90: + self.rotation = cv2.ROTATE_90_CLOCKWISE + elif config.rotation == 180: + self.rotation = cv2.ROTATE_180 + def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError(f"OpenCVCamera({self.camera_index}) is already connected.") - # First create a temporary camera trying to access `camera_index`, - # and verify it is a valid camera by calling `isOpened`. - - if platform.system() == "Linux": - # Linux uses ports for connecting to cameras - tmp_camera = cv2.VideoCapture(f"/dev/video{self.camera_index}") + if self.mock: + import tests.mock_cv2 as cv2 else: - tmp_camera = cv2.VideoCapture(self.camera_index) + import cv2 + # Use 1 thread to avoid blocking the main thread. Especially useful during data collection + # when other threads are used to save the images. + cv2.setNumThreads(1) + + camera_idx = f"/dev/video{self.camera_index}" if platform.system() == "Linux" else self.camera_index + # First create a temporary camera trying to access `camera_index`, + # and verify it is a valid camera by calling `isOpened`. + tmp_camera = cv2.VideoCapture(camera_idx) is_camera_open = tmp_camera.isOpened() # Release camera to make it accessible for `find_camera_indices` + tmp_camera.release() del tmp_camera # If the camera doesn't work, display the camera indices corresponding to # valid cameras. if not is_camera_open: # Verify that the provided `camera_index` is valid before printing the traceback - available_cam_ids = find_camera_indices() + cameras_info = find_cameras() + available_cam_ids = [cam["index"] for cam in cameras_info] if self.camera_index not in available_cam_ids: raise ValueError( f"`camera_index` is expected to be one of these available cameras {available_cam_ids}, but {self.camera_index} is provided instead. " "To find the camera index you should use, run `python lerobot/common/robot_devices/cameras/opencv.py`." ) - raise OSError(f"Can't access OpenCVCamera({self.camera_index}).") + raise OSError(f"Can't access OpenCVCamera({camera_idx}).") # Secondly, create the camera that will be used downstream. # Note: For some unknown reason, calling `isOpened` blocks the camera which then # needs to be re-created. - if platform.system() == "Linux": - self.camera = cv2.VideoCapture(f"/dev/video{self.camera_index}") - else: - self.camera = cv2.VideoCapture(self.camera_index) + self.camera = cv2.VideoCapture(camera_idx) if self.fps is not None: self.camera.set(cv2.CAP_PROP_FPS, self.fps) @@ -272,22 +348,24 @@ def connect(self): actual_width = self.camera.get(cv2.CAP_PROP_FRAME_WIDTH) actual_height = self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT) + # Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30) if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3): + # Using `OSError` since it's a broad that encompasses issues related to device communication raise OSError( f"Can't set {self.fps=} for OpenCVCamera({self.camera_index}). Actual value is {actual_fps}." ) - if self.width is not None and self.width != actual_width: + if self.width is not None and not math.isclose(self.width, actual_width, rel_tol=1e-3): raise OSError( f"Can't set {self.width=} for OpenCVCamera({self.camera_index}). Actual value is {actual_width}." ) - if self.height is not None and self.height != actual_height: + if self.height is not None and not math.isclose(self.height, actual_height, rel_tol=1e-3): raise OSError( f"Can't set {self.height=} for OpenCVCamera({self.camera_index}). Actual value is {actual_height}." ) - self.fps = actual_fps - self.width = actual_width - self.height = actual_height + self.fps = round(actual_fps) + self.width = round(actual_width) + self.height = round(actual_height) self.is_connected = True @@ -306,6 +384,7 @@ def read(self, temporary_color_mode: str | None = None) -> np.ndarray: start_time = time.perf_counter() ret, color_image = self.camera.read() + if not ret: raise OSError(f"Can't capture color image from camera {self.camera_index}.") @@ -320,6 +399,11 @@ def read(self, temporary_color_mode: str | None = None) -> np.ndarray: # However, Deep Learning framework such as LeRobot uses RGB format as default to train neural networks, # so we convert the image color from BGR to RGB. if requested_color_mode == "rgb": + if self.mock: + import tests.mock_cv2 as cv2 + else: + import cv2 + color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) h, w, _ = color_image.shape @@ -328,17 +412,25 @@ def read(self, temporary_color_mode: str | None = None) -> np.ndarray: f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead." ) + if self.rotation is not None: + color_image = cv2.rotate(color_image, self.rotation) + # log the number of seconds it took to read the image self.logs["delta_timestamp_s"] = time.perf_counter() - start_time # log the utc time at which the image was received self.logs["timestamp_utc"] = capture_timestamp_utc() + self.color_image = color_image + return color_image def read_loop(self): - while self.stop_event is None or not self.stop_event.is_set(): - self.color_image = self.read() + while not self.stop_event.is_set(): + try: + self.color_image = self.read() + except Exception as e: + print(f"Error reading in thread: {e}") def async_read(self): if not self.is_connected: @@ -353,15 +445,14 @@ def async_read(self): self.thread.start() num_tries = 0 - while self.color_image is None: - num_tries += 1 - time.sleep(1 / self.fps) - if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()): - raise Exception( - "The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called." - ) + while True: + if self.color_image is not None: + return self.color_image - return self.color_image + time.sleep(1 / self.fps) + num_tries += 1 + if num_tries > self.fps * 2: + raise TimeoutError("Timed out waiting for async_read() to start.") def disconnect(self): if not self.is_connected: @@ -369,16 +460,14 @@ def disconnect(self): f"OpenCVCamera({self.camera_index}) is not connected. Try running `camera.connect()` first." ) - if self.thread is not None and self.thread.is_alive(): - # wait for the thread to finish + if self.thread is not None: self.stop_event.set() - self.thread.join() + self.thread.join() # wait for the thread to finish self.thread = None self.stop_event = None self.camera.release() self.camera = None - self.is_connected = False def __del__(self): @@ -424,7 +513,7 @@ def __del__(self): parser.add_argument( "--record-time-s", type=float, - default=2.0, + default=4.0, help="Set the number of seconds used to record the frames. By default, 2 seconds.", ) args = parser.parse_args() diff --git a/lerobot/common/robot_devices/cameras/utils.py b/lerobot/common/robot_devices/cameras/utils.py index 0f329d9fb..7904a57a5 100644 --- a/lerobot/common/robot_devices/cameras/utils.py +++ b/lerobot/common/robot_devices/cameras/utils.py @@ -1,55 +1,8 @@ -from pathlib import Path from typing import Protocol -import cv2 -import einops import numpy as np -def write_shape_on_image_inplace(image): - height, width = image.shape[:2] - text = f"Width: {width} Height: {height}" - - # Define the font, scale, color, and thickness - font = cv2.FONT_HERSHEY_SIMPLEX - font_scale = 1 - color = (255, 0, 0) # Blue in BGR - thickness = 2 - - position = (10, height - 10) # 10 pixels from the bottom-left corner - cv2.putText(image, text, position, font, font_scale, color, thickness) - - -def save_color_image(image, path, write_shape=False): - path = Path(path) - path.parent.mkdir(parents=True, exist_ok=True) - if write_shape: - write_shape_on_image_inplace(image) - cv2.imwrite(str(path), image) - - -def save_depth_image(depth, path, write_shape=False): - path = Path(path) - path.parent.mkdir(parents=True, exist_ok=True) - - # Apply colormap on depth image (image must be converted to 8-bit per pixel first) - depth_image = cv2.applyColorMap(cv2.convertScaleAbs(depth, alpha=0.03), cv2.COLORMAP_JET) - - if write_shape: - write_shape_on_image_inplace(depth_image) - cv2.imwrite(str(path), depth_image) - - -def convert_torch_image_to_cv2(tensor, rgb_to_bgr=True): - assert tensor.ndim == 3 - c, h, w = tensor.shape - assert c < h and c < w - color_image = einops.rearrange(tensor, "c h w -> h w c").numpy() - if rgb_to_bgr: - color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR) - return color_image - - # Defines a camera type class Camera(Protocol): def connect(self): ... diff --git a/lerobot/common/robot_devices/control_utils.py b/lerobot/common/robot_devices/control_utils.py new file mode 100644 index 000000000..9b9649dde --- /dev/null +++ b/lerobot/common/robot_devices/control_utils.py @@ -0,0 +1,363 @@ +######################################################################################## +# Utilities +######################################################################################## + + +import logging +import time +import traceback +from contextlib import nullcontext +from copy import copy +from functools import cache + +import cv2 +import torch +import tqdm +from deepdiff import DeepDiff +from termcolor import colored + +from lerobot.common.datasets.image_writer import safe_stop_image_writer +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.datasets.utils import get_features_from_robot +from lerobot.common.policies.factory import make_policy +from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.common.robot_devices.utils import busy_wait +from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, set_global_seed +from lerobot.scripts.eval import get_pretrained_policy_path + + +def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, fps=None): + log_items = [] + if episode_index is not None: + log_items.append(f"ep:{episode_index}") + if frame_index is not None: + log_items.append(f"frame:{frame_index}") + + def log_dt(shortname, dt_val_s): + nonlocal log_items, fps + info_str = f"{shortname}:{dt_val_s * 1000:5.2f} ({1/ dt_val_s:3.1f}hz)" + if fps is not None: + actual_fps = 1 / dt_val_s + if actual_fps < fps - 1: + info_str = colored(info_str, "yellow") + log_items.append(info_str) + + # total step time displayed in milliseconds and its frequency + log_dt("dt", dt_s) + + # TODO(aliberts): move robot-specific logs logic in robot.print_logs() + if not robot.robot_type.startswith("stretch"): + for name in robot.leader_arms: + key = f"read_leader_{name}_pos_dt_s" + if key in robot.logs: + log_dt("dtRlead", robot.logs[key]) + + for name in robot.follower_arms: + key = f"write_follower_{name}_goal_pos_dt_s" + if key in robot.logs: + log_dt("dtWfoll", robot.logs[key]) + + key = f"read_follower_{name}_pos_dt_s" + if key in robot.logs: + log_dt("dtRfoll", robot.logs[key]) + + for name in robot.cameras: + key = f"read_camera_{name}_dt_s" + if key in robot.logs: + log_dt(f"dtR{name}", robot.logs[key]) + + info_str = " ".join(log_items) + logging.info(info_str) + + +@cache +def is_headless(): + """Detects if python is running without a monitor.""" + try: + import pynput # noqa + + return False + except Exception: + print( + "Error trying to import pynput. Switching to headless mode. " + "As a result, the video stream from the cameras won't be shown, " + "and you won't be able to change the control flow with keyboards. " + "For more info, see traceback below.\n" + ) + traceback.print_exc() + print() + return True + + +def has_method(_object: object, method_name: str): + return hasattr(_object, method_name) and callable(getattr(_object, method_name)) + + +def predict_action(observation, policy, device, use_amp): + observation = copy(observation) + with ( + torch.inference_mode(), + torch.autocast(device_type=device.type) if device.type == "cuda" and use_amp else nullcontext(), + ): + # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension + for name in observation: + if "image" in name: + observation[name] = observation[name].type(torch.float32) / 255 + observation[name] = observation[name].permute(2, 0, 1).contiguous() + observation[name] = observation[name].unsqueeze(0) + observation[name] = observation[name].to(device) + + # Compute the next action with the policy + # based on the current observation + action = policy.select_action(observation) + + # Remove batch dimension + action = action.squeeze(0) + + # Move to cpu, if not already the case + action = action.to("cpu") + + return action + + +def init_keyboard_listener(): + # Allow to exit early while recording an episode or resetting the environment, + # by tapping the right arrow key '->'. This might require a sudo permission + # to allow your terminal to monitor keyboard events. + events = {} + events["exit_early"] = False + events["rerecord_episode"] = False + events["stop_recording"] = False + + if is_headless(): + logging.warning( + "Headless environment detected. On-screen cameras display and keyboard inputs will not be available." + ) + listener = None + return listener, events + + # Only import pynput if not in a headless environment + from pynput import keyboard + + def on_press(key): + try: + if key == keyboard.Key.right: + print("Right arrow key pressed. Exiting loop...") + events["exit_early"] = True + elif key == keyboard.Key.left: + print("Left arrow key pressed. Exiting loop and rerecord the last episode...") + events["rerecord_episode"] = True + events["exit_early"] = True + elif key == keyboard.Key.esc: + print("Escape key pressed. Stopping data recording...") + events["stop_recording"] = True + events["exit_early"] = True + except Exception as e: + print(f"Error handling key press: {e}") + + listener = keyboard.Listener(on_press=on_press) + listener.start() + + return listener, events + + +def init_policy(pretrained_policy_name_or_path, policy_overrides): + """Instantiate the policy and load fps, device and use_amp from config yaml""" + pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path) + hydra_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides) + policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path) + + # Check device is available + device = get_safe_torch_device(hydra_cfg.device, log=True) + use_amp = hydra_cfg.use_amp + policy_fps = hydra_cfg.env.fps + + policy.eval() + policy.to(device) + + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + set_global_seed(hydra_cfg.seed) + return policy, policy_fps, device, use_amp + + +def warmup_record( + robot, + events, + enable_teloperation, + warmup_time_s, + display_cameras, + fps, +): + control_loop( + robot=robot, + control_time_s=warmup_time_s, + display_cameras=display_cameras, + events=events, + fps=fps, + teleoperate=enable_teloperation, + ) + + +def record_episode( + robot, + dataset, + events, + episode_time_s, + display_cameras, + policy, + device, + use_amp, + fps, +): + control_loop( + robot=robot, + control_time_s=episode_time_s, + display_cameras=display_cameras, + dataset=dataset, + events=events, + policy=policy, + device=device, + use_amp=use_amp, + fps=fps, + teleoperate=policy is None, + ) + + +@safe_stop_image_writer +def control_loop( + robot, + control_time_s=None, + teleoperate=False, + display_cameras=False, + dataset: LeRobotDataset | None = None, + events=None, + policy=None, + device=None, + use_amp=None, + fps=None, +): + # TODO(rcadene): Add option to record logs + if not robot.is_connected: + robot.connect() + + if events is None: + events = {"exit_early": False} + + if control_time_s is None: + control_time_s = float("inf") + + if teleoperate and policy is not None: + raise ValueError("When `teleoperate` is True, `policy` should be None.") + + if dataset is not None and fps is not None and dataset.fps != fps: + raise ValueError(f"The dataset fps should be equal to requested fps ({dataset['fps']} != {fps}).") + + timestamp = 0 + start_episode_t = time.perf_counter() + while timestamp < control_time_s: + start_loop_t = time.perf_counter() + + if teleoperate: + observation, action = robot.teleop_step(record_data=True) + else: + observation = robot.capture_observation() + + if policy is not None: + pred_action = predict_action(observation, policy, device, use_amp) + # Action can eventually be clipped using `max_relative_target`, + # so action actually sent is saved in the dataset. + action = robot.send_action(pred_action) + action = {"action": action} + + if dataset is not None: + frame = {**observation, **action} + dataset.add_frame(frame) + + if display_cameras and not is_headless(): + image_keys = [key for key in observation if "image" in key] + for key in image_keys: + cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) + cv2.waitKey(1) + + if fps is not None: + dt_s = time.perf_counter() - start_loop_t + busy_wait(1 / fps - dt_s) + + dt_s = time.perf_counter() - start_loop_t + log_control_info(robot, dt_s, fps=fps) + + timestamp = time.perf_counter() - start_episode_t + if events["exit_early"]: + events["exit_early"] = False + break + + +def reset_environment(robot, events, reset_time_s): + # TODO(rcadene): refactor warmup_record and reset_environment + # TODO(alibets): allow for teleop during reset + if has_method(robot, "teleop_safety_stop"): + robot.teleop_safety_stop() + + timestamp = 0 + start_vencod_t = time.perf_counter() + + # Wait if necessary + with tqdm.tqdm(total=reset_time_s, desc="Waiting") as pbar: + while timestamp < reset_time_s: + time.sleep(1) + timestamp = time.perf_counter() - start_vencod_t + pbar.update(1) + if events["exit_early"]: + events["exit_early"] = False + break + + +def stop_recording(robot, listener, display_cameras): + robot.disconnect() + + if not is_headless(): + if listener is not None: + listener.stop() + + if display_cameras: + cv2.destroyAllWindows() + + +def sanity_check_dataset_name(repo_id, policy): + _, dataset_name = repo_id.split("/") + # either repo_id doesnt start with "eval_" and there is no policy + # or repo_id starts with "eval_" and there is a policy + + # Check if dataset_name starts with "eval_" but policy is missing + if dataset_name.startswith("eval_") and policy is None: + raise ValueError( + f"Your dataset name begins with 'eval_' ({dataset_name}), but no policy is provided." + ) + + # Check if dataset_name does not start with "eval_" but policy is provided + if not dataset_name.startswith("eval_") and policy is not None: + raise ValueError( + f"Your dataset name does not begin with 'eval_' ({dataset_name}), but a policy is provided ({policy})." + ) + + +def sanity_check_dataset_robot_compatibility( + dataset: LeRobotDataset, robot: Robot, fps: int, use_videos: bool +) -> None: + fields = [ + ("robot_type", dataset.meta.robot_type, robot.robot_type), + ("fps", dataset.fps, fps), + ("features", dataset.features, get_features_from_robot(robot, use_videos)), + ] + + mismatches = [] + for field, dataset_value, present_value in fields: + diff = DeepDiff(dataset_value, present_value, exclude_regex_paths=[r".*\['info'\]$"]) + if diff: + mismatches.append(f"{field}: expected {present_value}, got {dataset_value}") + + if mismatches: + raise ValueError( + "Dataset metadata compatibility check failed with mismatches:\n" + "\n".join(mismatches) + ) diff --git a/lerobot/common/robot_devices/motors/dynamixel.py b/lerobot/common/robot_devices/motors/dynamixel.py index 491963fed..1e1396f76 100644 --- a/lerobot/common/robot_devices/motors/dynamixel.py +++ b/lerobot/common/robot_devices/motors/dynamixel.py @@ -4,21 +4,9 @@ import time import traceback from copy import deepcopy -from pathlib import Path import numpy as np import tqdm -from dynamixel_sdk import ( - COMM_SUCCESS, - DXL_HIBYTE, - DXL_HIWORD, - DXL_LOBYTE, - DXL_LOWORD, - GroupSyncRead, - GroupSyncWrite, - PacketHandler, - PortHandler, -) from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError from lerobot.common.utils.utils import capture_timestamp_utc @@ -166,24 +154,29 @@ def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str return steps -def convert_to_bytes(value, bytes): +def convert_to_bytes(value, bytes, mock=False): + if mock: + return value + + import dynamixel_sdk as dxl + # Note: No need to convert back into unsigned int, since this byte preprocessing # already handles it for us. if bytes == 1: data = [ - DXL_LOBYTE(DXL_LOWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), ] elif bytes == 2: data = [ - DXL_LOBYTE(DXL_LOWORD(value)), - DXL_HIBYTE(DXL_LOWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)), ] elif bytes == 4: data = [ - DXL_LOBYTE(DXL_LOWORD(value)), - DXL_HIBYTE(DXL_LOWORD(value)), - DXL_LOBYTE(DXL_HIWORD(value)), - DXL_HIBYTE(DXL_HIWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)), + dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)), + dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)), ] else: raise NotImplementedError( @@ -235,35 +228,6 @@ def assert_same_address(model_ctrl_table, motor_models, data_name): ) -def find_available_ports(): - ports = [] - for path in Path("/dev").glob("tty*"): - ports.append(str(path)) - return ports - - -def find_port(): - print("Finding all available ports for the DynamixelMotorsBus.") - ports_before = find_available_ports() - print(ports_before) - - print("Remove the usb cable from your DynamixelMotorsBus and press Enter when done.") - input() - - time.sleep(0.5) - ports_after = find_available_ports() - ports_diff = list(set(ports_before) - set(ports_after)) - - if len(ports_diff) == 1: - port = ports_diff[0] - print(f"The port of this DynamixelMotorsBus is '{port}'") - print("Reconnect the usb cable.") - elif len(ports_diff) == 0: - raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).") - else: - raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).") - - class TorqueMode(enum.Enum): ENABLED = 1 DISABLED = 0 @@ -296,8 +260,8 @@ class DynamixelMotorsBus: A DynamixelMotorsBus instance requires a port (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`)). To find the port, you can run our utility script: ```bash - python lerobot/common/robot_devices/motors/dynamixel.py - >>> Finding all available ports for the DynamixelMotorsBus. + python lerobot/scripts/find_motors_bus_port.py + >>> Finding all available ports for the MotorBus. >>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] >>> Remove the usb cable from your DynamixelMotorsBus and press Enter when done. >>> The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751. @@ -333,9 +297,11 @@ def __init__( motors: dict[str, tuple[int, str]], extra_model_control_table: dict[str, list[tuple]] | None = None, extra_model_resolution: dict[str, int] | None = None, + mock=False, ): self.port = port self.motors = motors + self.mock = mock self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) if extra_model_control_table: @@ -359,8 +325,13 @@ def connect(self): f"DynamixelMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice." ) - self.port_handler = PortHandler(self.port) - self.packet_handler = PacketHandler(PROTOCOL_VERSION) + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl + + self.port_handler = dxl.PortHandler(self.port) + self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION) try: if not self.port_handler.openPort(): @@ -368,7 +339,7 @@ def connect(self): except Exception: traceback.print_exc() print( - "\nTry running `python lerobot/common/robot_devices/motors/dynamixel.py` to make sure you are using the correct port.\n" + "\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n" ) raise @@ -377,25 +348,18 @@ def connect(self): self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS) - # Set expected baudrate for the bus - self.set_bus_baudrate(BAUDRATE) + def reconnect(self): + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl - if not self.are_motors_configured(): - input( - "\n/!\\ A configuration issue has been detected with your motors: \n" - "If it's the first time that you use these motors, press enter to configure your motors... but before " - "verify that all the cables are connected the proper way. If you find an issue, before making a modification, " - "kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power " - "again and relaunch the script.\n" - ) - print() - self.configure_motors() + self.port_handler = dxl.PortHandler(self.port) + self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION) - def reconnect(self): - self.port_handler = PortHandler(self.port) - self.packet_handler = PacketHandler(PROTOCOL_VERSION) if not self.port_handler.openPort(): raise OSError(f"Failed to open port '{self.port}'.") + self.is_connected = True def are_motors_configured(self): @@ -407,120 +371,14 @@ def are_motors_configured(self): print(e) return False - def configure_motors(self): - # TODO(rcadene): This script assumes motors follow the X_SERIES baudrates - # TODO(rcadene): Refactor this function with intermediate high-level functions - - print("Scanning all baudrates and motor indices") - all_baudrates = set(X_SERIES_BAUDRATE_TABLE.values()) - ids_per_baudrate = {} - for baudrate in all_baudrates: - self.set_bus_baudrate(baudrate) - present_ids = self.find_motor_indices() - if len(present_ids) > 0: - ids_per_baudrate[baudrate] = present_ids - print(f"Motor indices detected: {ids_per_baudrate}") - print() - - possible_baudrates = list(ids_per_baudrate.keys()) - possible_ids = list({idx for sublist in ids_per_baudrate.values() for idx in sublist}) - untaken_ids = list(set(range(MAX_ID_RANGE)) - set(possible_ids) - set(self.motor_indices)) - - # Connect successively one motor to the chain and write a unique random index for each - for i in range(len(self.motors)): - self.disconnect() - input( - "1. Unplug the power cord\n" - "2. Plug/unplug minimal number of cables to only have the first " - f"{i+1} motor(s) ({self.motor_names[:i+1]}) connected.\n" - "3. Re-plug the power cord\n" - "Press Enter to continue..." - ) - print() - self.reconnect() - - if i > 0: - try: - self._read_with_motor_ids(self.motor_models, untaken_ids[:i], "ID") - except ConnectionError: - print(f"Failed to read from {untaken_ids[:i+1]}. Make sure the power cord is plugged in.") - input("Press Enter to continue...") - print() - self.reconnect() - - print("Scanning possible baudrates and motor indices") - motor_found = False - for baudrate in possible_baudrates: - self.set_bus_baudrate(baudrate) - present_ids = self.find_motor_indices(possible_ids) - if len(present_ids) == 1: - present_idx = present_ids[0] - print(f"Detected motor with index {present_idx}") - - if baudrate != BAUDRATE: - print(f"Setting its baudrate to {BAUDRATE}") - baudrate_idx = list(X_SERIES_BAUDRATE_TABLE.values()).index(BAUDRATE) - - # The write can fail, so we allow retries - for _ in range(NUM_WRITE_RETRY): - self._write_with_motor_ids( - self.motor_models, present_idx, "Baud_Rate", baudrate_idx - ) - time.sleep(0.5) - self.set_bus_baudrate(BAUDRATE) - try: - present_baudrate_idx = self._read_with_motor_ids( - self.motor_models, present_idx, "Baud_Rate" - ) - except ConnectionError: - print("Failed to write baudrate. Retrying.") - self.set_bus_baudrate(baudrate) - continue - break - else: - raise - - if present_baudrate_idx != baudrate_idx: - raise OSError("Failed to write baudrate.") - - print(f"Setting its index to a temporary untaken index ({untaken_ids[i]})") - self._write_with_motor_ids(self.motor_models, present_idx, "ID", untaken_ids[i]) - - present_idx = self._read_with_motor_ids(self.motor_models, untaken_ids[i], "ID") - if present_idx != untaken_ids[i]: - raise OSError("Failed to write index.") - - motor_found = True - break - elif len(present_ids) > 1: - raise OSError(f"More than one motor detected ({present_ids}), but only one was expected.") - - if not motor_found: - raise OSError( - "No motor found, but one new motor expected. Verify power cord is plugged in and retry." - ) - print() - - print(f"Setting expected motor indices: {self.motor_indices}") - self.set_bus_baudrate(BAUDRATE) - self._write_with_motor_ids( - self.motor_models, untaken_ids[: len(self.motors)], "ID", self.motor_indices - ) - print() - - if (self.read("ID") != self.motor_indices).any(): - raise OSError("Failed to write motors indices.") - - print("Configuration is done!") - - def find_motor_indices(self, possible_ids=None): + def find_motor_indices(self, possible_ids=None, num_retry=2): if possible_ids is None: possible_ids = range(MAX_ID_RANGE) indices = [] for idx in tqdm.tqdm(possible_ids): try: - present_idx = self._read_with_motor_ids(self.motor_models, [idx], "ID")[0] + present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0] except ConnectionError: continue @@ -780,7 +638,12 @@ def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | values = np.round(values).astype(np.int32) return values - def _read_with_motor_ids(self, motor_models, motor_ids, data_name): + def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY): + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl + return_list = True if not isinstance(motor_ids, list): return_list = False @@ -788,12 +651,16 @@ def _read_with_motor_ids(self, motor_models, motor_ids, data_name): assert_same_address(self.model_ctrl_table, self.motor_models, data_name) addr, bytes = self.model_ctrl_table[motor_models[0]][data_name] - group = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) + group = dxl.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) for idx in motor_ids: group.addParam(idx) - comm = group.txRxPacket() - if comm != COMM_SUCCESS: + for _ in range(num_retry): + comm = group.txRxPacket() + if comm == dxl.COMM_SUCCESS: + break + + if comm != dxl.COMM_SUCCESS: raise ConnectionError( f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: " f"{self.packet_handler.getTxRxResult(comm)}" @@ -817,6 +684,11 @@ def read(self, data_name, motor_names: str | list[str] | None = None): start_time = time.perf_counter() + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl + if motor_names is None: motor_names = self.motor_names @@ -836,16 +708,18 @@ def read(self, data_name, motor_names: str | list[str] | None = None): if data_name not in self.group_readers: # create new group reader - self.group_readers[group_key] = GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) + self.group_readers[group_key] = dxl.GroupSyncRead( + self.port_handler, self.packet_handler, addr, bytes + ) for idx in motor_ids: self.group_readers[group_key].addParam(idx) for _ in range(NUM_READ_RETRY): comm = self.group_readers[group_key].txRxPacket() - if comm == COMM_SUCCESS: + if comm == dxl.COMM_SUCCESS: break - if comm != COMM_SUCCESS: + if comm != dxl.COMM_SUCCESS: raise ConnectionError( f"Read failed due to communication error on port {self.port} for group_key {group_key}: " f"{self.packet_handler.getTxRxResult(comm)}" @@ -875,7 +749,12 @@ def read(self, data_name, motor_names: str | list[str] | None = None): return values - def _write_with_motor_ids(self, motor_models, motor_ids, data_name, values): + def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY): + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl + if not isinstance(motor_ids, list): motor_ids = [motor_ids] if not isinstance(values, list): @@ -883,13 +762,17 @@ def _write_with_motor_ids(self, motor_models, motor_ids, data_name, values): assert_same_address(self.model_ctrl_table, motor_models, data_name) addr, bytes = self.model_ctrl_table[motor_models[0]][data_name] - group = GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes) + group = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes) for idx, value in zip(motor_ids, values, strict=True): - data = convert_to_bytes(value, bytes) + data = convert_to_bytes(value, bytes, self.mock) group.addParam(idx, data) - comm = group.txPacket() - if comm != COMM_SUCCESS: + for _ in range(num_retry): + comm = group.txPacket() + if comm == dxl.COMM_SUCCESS: + break + + if comm != dxl.COMM_SUCCESS: raise ConnectionError( f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: " f"{self.packet_handler.getTxRxResult(comm)}" @@ -903,6 +786,11 @@ def write(self, data_name, values: int | float | np.ndarray, motor_names: str | start_time = time.perf_counter() + if self.mock: + import tests.mock_dynamixel_sdk as dxl + else: + import dynamixel_sdk as dxl + if motor_names is None: motor_names = self.motor_names @@ -932,19 +820,19 @@ def write(self, data_name, values: int | float | np.ndarray, motor_names: str | init_group = data_name not in self.group_readers if init_group: - self.group_writers[group_key] = GroupSyncWrite( + self.group_writers[group_key] = dxl.GroupSyncWrite( self.port_handler, self.packet_handler, addr, bytes ) for idx, value in zip(motor_ids, values, strict=True): - data = convert_to_bytes(value, bytes) + data = convert_to_bytes(value, bytes, self.mock) if init_group: self.group_writers[group_key].addParam(idx, data) else: self.group_writers[group_key].changeParam(idx, data) comm = self.group_writers[group_key].txPacket() - if comm != COMM_SUCCESS: + if comm != dxl.COMM_SUCCESS: raise ConnectionError( f"Write failed due to communication error on port {self.port} for group_key {group_key}: " f"{self.packet_handler.getTxRxResult(comm)}" @@ -977,8 +865,3 @@ def disconnect(self): def __del__(self): if getattr(self, "is_connected", False): self.disconnect() - - -if __name__ == "__main__": - # Helper to find the usb port associated to all your DynamixelMotorsBus. - find_port() diff --git a/lerobot/common/robot_devices/motors/feetech.py b/lerobot/common/robot_devices/motors/feetech.py new file mode 100644 index 000000000..0d5480f7a --- /dev/null +++ b/lerobot/common/robot_devices/motors/feetech.py @@ -0,0 +1,887 @@ +import enum +import logging +import math +import time +import traceback +from copy import deepcopy + +import numpy as np +import tqdm + +from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError +from lerobot.common.utils.utils import capture_timestamp_utc + +PROTOCOL_VERSION = 0 +BAUDRATE = 1_000_000 +TIMEOUT_MS = 1000 + +MAX_ID_RANGE = 252 + +# The following bounds define the lower and upper joints range (after calibration). +# For joints in degree (i.e. revolute joints), their nominal range is [-180, 180] degrees +# which corresponds to a half rotation on the left and half rotation on the right. +# Some joints might require higher range, so we allow up to [-270, 270] degrees until +# an error is raised. +LOWER_BOUND_DEGREE = -270 +UPPER_BOUND_DEGREE = 270 +# For joints in percentage (i.e. joints that move linearly like the prismatic joint of a gripper), +# their nominal range is [0, 100] %. For instance, for Aloha gripper, 0% is fully +# closed, and 100% is fully open. To account for slight calibration issue, we allow up to +# [-10, 110] until an error is raised. +LOWER_BOUND_LINEAR = -10 +UPPER_BOUND_LINEAR = 110 + +HALF_TURN_DEGREE = 180 + + +# See this link for STS3215 Memory Table: +# https://docs.google.com/spreadsheets/d/1GVs7W1VS1PqdhA1nW-abeyAHhTUxKUdR/edit?usp=sharing&ouid=116566590112741600240&rtpof=true&sd=true +# data_name: (address, size_byte) +SCS_SERIES_CONTROL_TABLE = { + "Model": (3, 2), + "ID": (5, 1), + "Baud_Rate": (6, 1), + "Return_Delay": (7, 1), + "Response_Status_Level": (8, 1), + "Min_Angle_Limit": (9, 2), + "Max_Angle_Limit": (11, 2), + "Max_Temperature_Limit": (13, 1), + "Max_Voltage_Limit": (14, 1), + "Min_Voltage_Limit": (15, 1), + "Max_Torque_Limit": (16, 2), + "Phase": (18, 1), + "Unloading_Condition": (19, 1), + "LED_Alarm_Condition": (20, 1), + "P_Coefficient": (21, 1), + "D_Coefficient": (22, 1), + "I_Coefficient": (23, 1), + "Minimum_Startup_Force": (24, 2), + "CW_Dead_Zone": (26, 1), + "CCW_Dead_Zone": (27, 1), + "Protection_Current": (28, 2), + "Angular_Resolution": (30, 1), + "Offset": (31, 2), + "Mode": (33, 1), + "Protective_Torque": (34, 1), + "Protection_Time": (35, 1), + "Overload_Torque": (36, 1), + "Speed_closed_loop_P_proportional_coefficient": (37, 1), + "Over_Current_Protection_Time": (38, 1), + "Velocity_closed_loop_I_integral_coefficient": (39, 1), + "Torque_Enable": (40, 1), + "Acceleration": (41, 1), + "Goal_Position": (42, 2), + "Goal_Time": (44, 2), + "Goal_Speed": (46, 2), + "Torque_Limit": (48, 2), + "Lock": (55, 1), + "Present_Position": (56, 2), + "Present_Speed": (58, 2), + "Present_Load": (60, 2), + "Present_Voltage": (62, 1), + "Present_Temperature": (63, 1), + "Status": (65, 1), + "Moving": (66, 1), + "Present_Current": (69, 2), + # Not in the Memory Table + "Maximum_Acceleration": (85, 2), +} + +SCS_SERIES_BAUDRATE_TABLE = { + 0: 1_000_000, + 1: 500_000, + 2: 250_000, + 3: 128_000, + 4: 115_200, + 5: 57_600, + 6: 38_400, + 7: 19_200, +} + +CALIBRATION_REQUIRED = ["Goal_Position", "Present_Position"] +CONVERT_UINT32_TO_INT32_REQUIRED = ["Goal_Position", "Present_Position"] + + +MODEL_CONTROL_TABLE = { + "scs_series": SCS_SERIES_CONTROL_TABLE, + "sts3215": SCS_SERIES_CONTROL_TABLE, +} + +MODEL_RESOLUTION = { + "scs_series": 4096, + "sts3215": 4096, +} + +MODEL_BAUDRATE_TABLE = { + "scs_series": SCS_SERIES_BAUDRATE_TABLE, + "sts3215": SCS_SERIES_BAUDRATE_TABLE, +} + +# High number of retries is needed for feetech compared to dynamixel motors. +NUM_READ_RETRY = 20 +NUM_WRITE_RETRY = 20 + + +def convert_degrees_to_steps(degrees: float | np.ndarray, models: str | list[str]) -> np.ndarray: + """This function converts the degree range to the step range for indicating motors rotation. + It assumes a motor achieves a full rotation by going from -180 degree position to +180. + The motor resolution (e.g. 4096) corresponds to the number of steps needed to achieve a full rotation. + """ + resolutions = [MODEL_RESOLUTION[model] for model in models] + steps = degrees / 180 * np.array(resolutions) / 2 + steps = steps.astype(int) + return steps + + +def convert_to_bytes(value, bytes, mock=False): + if mock: + return value + + import scservo_sdk as scs + + # Note: No need to convert back into unsigned int, since this byte preprocessing + # already handles it for us. + if bytes == 1: + data = [ + scs.SCS_LOBYTE(scs.SCS_LOWORD(value)), + ] + elif bytes == 2: + data = [ + scs.SCS_LOBYTE(scs.SCS_LOWORD(value)), + scs.SCS_HIBYTE(scs.SCS_LOWORD(value)), + ] + elif bytes == 4: + data = [ + scs.SCS_LOBYTE(scs.SCS_LOWORD(value)), + scs.SCS_HIBYTE(scs.SCS_LOWORD(value)), + scs.SCS_LOBYTE(scs.SCS_HIWORD(value)), + scs.SCS_HIBYTE(scs.SCS_HIWORD(value)), + ] + else: + raise NotImplementedError( + f"Value of the number of bytes to be sent is expected to be in [1, 2, 4], but " + f"{bytes} is provided instead." + ) + return data + + +def get_group_sync_key(data_name, motor_names): + group_key = f"{data_name}_" + "_".join(motor_names) + return group_key + + +def get_result_name(fn_name, data_name, motor_names): + group_key = get_group_sync_key(data_name, motor_names) + rslt_name = f"{fn_name}_{group_key}" + return rslt_name + + +def get_queue_name(fn_name, data_name, motor_names): + group_key = get_group_sync_key(data_name, motor_names) + queue_name = f"{fn_name}_{group_key}" + return queue_name + + +def get_log_name(var_name, fn_name, data_name, motor_names): + group_key = get_group_sync_key(data_name, motor_names) + log_name = f"{var_name}_{fn_name}_{group_key}" + return log_name + + +def assert_same_address(model_ctrl_table, motor_models, data_name): + all_addr = [] + all_bytes = [] + for model in motor_models: + addr, bytes = model_ctrl_table[model][data_name] + all_addr.append(addr) + all_bytes.append(bytes) + + if len(set(all_addr)) != 1: + raise NotImplementedError( + f"At least two motor models use a different address for `data_name`='{data_name}' ({list(zip(motor_models, all_addr, strict=False))}). Contact a LeRobot maintainer." + ) + + if len(set(all_bytes)) != 1: + raise NotImplementedError( + f"At least two motor models use a different bytes representation for `data_name`='{data_name}' ({list(zip(motor_models, all_bytes, strict=False))}). Contact a LeRobot maintainer." + ) + + +class TorqueMode(enum.Enum): + ENABLED = 1 + DISABLED = 0 + + +class DriveMode(enum.Enum): + NON_INVERTED = 0 + INVERTED = 1 + + +class CalibrationMode(enum.Enum): + # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180] + DEGREE = 0 + # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100] + LINEAR = 1 + + +class JointOutOfRangeError(Exception): + def __init__(self, message="Joint is out of range"): + self.message = message + super().__init__(self.message) + + +class FeetechMotorsBus: + """ + The FeetechMotorsBus class allows to efficiently read and write to the attached motors. It relies on + the python feetech sdk to communicate with the motors. For more info, see the [feetech SDK Documentation](https://emanual.robotis.com/docs/en/software/feetech/feetech_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20). + + A FeetechMotorsBus instance requires a port (e.g. `FeetechMotorsBus(port="/dev/tty.usbmodem575E0031751"`)). + To find the port, you can run our utility script: + ```bash + python lerobot/scripts/find_motors_bus_port.py + >>> Finding all available ports for the MotorsBus. + >>> ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] + >>> Remove the usb cable from your FeetechMotorsBus and press Enter when done. + >>> The port of this FeetechMotorsBus is /dev/tty.usbmodem575E0031751. + >>> Reconnect the usb cable. + ``` + + Example of usage for 1 motor connected to the bus: + ```python + motor_name = "gripper" + motor_index = 6 + motor_model = "sts3215" + + motors_bus = FeetechMotorsBus( + port="/dev/tty.usbmodem575E0031751", + motors={motor_name: (motor_index, motor_model)}, + ) + motors_bus.connect() + + position = motors_bus.read("Present_Position") + + # move from a few motor steps as an example + few_steps = 30 + motors_bus.write("Goal_Position", position + few_steps) + + # when done, consider disconnecting + motors_bus.disconnect() + ``` + """ + + def __init__( + self, + port: str, + motors: dict[str, tuple[int, str]], + extra_model_control_table: dict[str, list[tuple]] | None = None, + extra_model_resolution: dict[str, int] | None = None, + mock=False, + ): + self.port = port + self.motors = motors + self.mock = mock + + self.model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) + if extra_model_control_table: + self.model_ctrl_table.update(extra_model_control_table) + + self.model_resolution = deepcopy(MODEL_RESOLUTION) + if extra_model_resolution: + self.model_resolution.update(extra_model_resolution) + + self.port_handler = None + self.packet_handler = None + self.calibration = None + self.is_connected = False + self.group_readers = {} + self.group_writers = {} + self.logs = {} + + self.track_positions = {} + + def connect(self): + if self.is_connected: + raise RobotDeviceAlreadyConnectedError( + f"FeetechMotorsBus({self.port}) is already connected. Do not call `motors_bus.connect()` twice." + ) + + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + self.port_handler = scs.PortHandler(self.port) + self.packet_handler = scs.PacketHandler(PROTOCOL_VERSION) + + try: + if not self.port_handler.openPort(): + raise OSError(f"Failed to open port '{self.port}'.") + except Exception: + traceback.print_exc() + print( + "\nTry running `python lerobot/scripts/find_motors_bus_port.py` to make sure you are using the correct port.\n" + ) + raise + + # Allow to read and write + self.is_connected = True + + self.port_handler.setPacketTimeoutMillis(TIMEOUT_MS) + + def reconnect(self): + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + self.port_handler = scs.PortHandler(self.port) + self.packet_handler = scs.PacketHandler(PROTOCOL_VERSION) + + if not self.port_handler.openPort(): + raise OSError(f"Failed to open port '{self.port}'.") + + self.is_connected = True + + def are_motors_configured(self): + # Only check the motor indices and not baudrate, since if the motor baudrates are incorrect, + # a ConnectionError will be raised anyway. + try: + return (self.motor_indices == self.read("ID")).all() + except ConnectionError as e: + print(e) + return False + + def find_motor_indices(self, possible_ids=None, num_retry=2): + if possible_ids is None: + possible_ids = range(MAX_ID_RANGE) + + indices = [] + for idx in tqdm.tqdm(possible_ids): + try: + present_idx = self.read_with_motor_ids(self.motor_models, [idx], "ID", num_retry=num_retry)[0] + except ConnectionError: + continue + + if idx != present_idx: + # sanity check + raise OSError( + "Motor index used to communicate through the bus is not the same as the one present in the motor memory. The motor memory might be damaged." + ) + indices.append(idx) + + return indices + + def set_bus_baudrate(self, baudrate): + present_bus_baudrate = self.port_handler.getBaudRate() + if present_bus_baudrate != baudrate: + print(f"Setting bus baud rate to {baudrate}. Previously {present_bus_baudrate}.") + self.port_handler.setBaudRate(baudrate) + + if self.port_handler.getBaudRate() != baudrate: + raise OSError("Failed to write bus baud rate.") + + @property + def motor_names(self) -> list[str]: + return list(self.motors.keys()) + + @property + def motor_models(self) -> list[str]: + return [model for _, model in self.motors.values()] + + @property + def motor_indices(self) -> list[int]: + return [idx for idx, _ in self.motors.values()] + + def set_calibration(self, calibration: dict[str, list]): + self.calibration = calibration + + def apply_calibration_autocorrect(self, values: np.ndarray | list, motor_names: list[str] | None): + """This function apply the calibration, automatically detects out of range errors for motors values and attempt to correct. + + For more info, see docstring of `apply_calibration` and `autocorrect_calibration`. + """ + try: + values = self.apply_calibration(values, motor_names) + except JointOutOfRangeError as e: + print(e) + self.autocorrect_calibration(values, motor_names) + values = self.apply_calibration(values, motor_names) + return values + + def apply_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): + """Convert from unsigned int32 joint position range [0, 2**32[ to the universal float32 nominal degree range ]-180.0, 180.0[ with + a "zero position" at 0 degree. + + Note: We say "nominal degree range" since the motors can take values outside this range. For instance, 190 degrees, if the motor + rotate more than a half a turn from the zero position. However, most motors can't rotate more than 180 degrees and will stay in this range. + + Joints values are original in [0, 2**32[ (unsigned int32). Each motor are expected to complete a full rotation + when given a goal position that is + or - their resolution. For instance, feetech xl330-m077 have a resolution of 4096, and + at any position in their original range, let's say the position 56734, they complete a full rotation clockwise by moving to 60830, + or anticlockwise by moving to 52638. The position in the original range is arbitrary and might change a lot between each motor. + To harmonize between motors of the same model, different robots, or even models of different brands, we propose to work + in the centered nominal degree range ]-180, 180[. + """ + if motor_names is None: + motor_names = self.motor_names + + # Convert from unsigned int32 original range [0, 2**32] to signed float32 range + values = values.astype(np.float32) + + for i, name in enumerate(motor_names): + calib_idx = self.calibration["motor_names"].index(name) + calib_mode = self.calibration["calib_mode"][calib_idx] + + if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: + drive_mode = self.calibration["drive_mode"][calib_idx] + homing_offset = self.calibration["homing_offset"][calib_idx] + _, model = self.motors[name] + resolution = self.model_resolution[model] + + # Update direction of rotation of the motor to match between leader and follower. + # In fact, the motor of the leader for a given joint can be assembled in an + # opposite direction in term of rotation than the motor of the follower on the same joint. + if drive_mode: + values[i] *= -1 + + # Convert from range [-2**31, 2**31[ to + # nominal range ]-resolution, resolution[ (e.g. ]-2048, 2048[) + values[i] += homing_offset + + # Convert from range ]-resolution, resolution[ to + # universal float32 centered degree range ]-180, 180[ + values[i] = values[i] / (resolution // 2) * HALF_TURN_DEGREE + + if (values[i] < LOWER_BOUND_DEGREE) or (values[i] > UPPER_BOUND_DEGREE): + raise JointOutOfRangeError( + f"Wrong motor position range detected for {name}. " + f"Expected to be in nominal range of [-{HALF_TURN_DEGREE}, {HALF_TURN_DEGREE}] degrees (a full rotation), " + f"with a maximum range of [{LOWER_BOUND_DEGREE}, {UPPER_BOUND_DEGREE}] degrees to account for joints that can rotate a bit more, " + f"but present value is {values[i]} degree. " + "This might be due to a cable connection issue creating an artificial 360 degrees jump in motor values. " + "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`" + ) + + elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: + start_pos = self.calibration["start_pos"][calib_idx] + end_pos = self.calibration["end_pos"][calib_idx] + + # Rescale the present position to a nominal range [0, 100] %, + # useful for joints with linear motions like Aloha gripper + values[i] = (values[i] - start_pos) / (end_pos - start_pos) * 100 + + if (values[i] < LOWER_BOUND_LINEAR) or (values[i] > UPPER_BOUND_LINEAR): + raise JointOutOfRangeError( + f"Wrong motor position range detected for {name}. " + f"Expected to be in nominal range of [0, 100] % (a full linear translation), " + f"with a maximum range of [{LOWER_BOUND_LINEAR}, {UPPER_BOUND_LINEAR}] % to account for some imprecision during calibration, " + f"but present value is {values[i]} %. " + "This might be due to a cable connection issue creating an artificial jump in motor values. " + "You need to recalibrate by running: `python lerobot/scripts/control_robot.py calibrate`" + ) + + return values + + def autocorrect_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): + """This function automatically detects issues with values of motors after calibration, and correct for these issues. + + Some motors might have values outside of expected maximum bounds after calibration. + For instance, for a joint in degree, its value can be outside [-270, 270] degrees, which is totally unexpected given + a nominal range of [-180, 180] degrees, which represents half a turn to the left or right starting from zero position. + + Known issues: + #1: Motor value randomly shifts of a full turn, caused by hardware/connection errors. + #2: Motor internal homing offset is shifted of a full turn, caused by using default calibration (e.g Aloha). + #3: motor internal homing offset is shifted of less or more than a full turn, caused by using default calibration + or by human error during manual calibration. + + Issues #1 and #2 can be solved by shifting the calibration homing offset by a full turn. + Issue #3 will be visually detected by user and potentially captured by the safety feature `max_relative_target`, + that will slow down the motor, raise an error asking to recalibrate. Manual recalibrating will solve the issue. + + Note: A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096. + """ + if motor_names is None: + motor_names = self.motor_names + + # Convert from unsigned int32 original range [0, 2**32] to signed float32 range + values = values.astype(np.float32) + + for i, name in enumerate(motor_names): + calib_idx = self.calibration["motor_names"].index(name) + calib_mode = self.calibration["calib_mode"][calib_idx] + + if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: + drive_mode = self.calibration["drive_mode"][calib_idx] + homing_offset = self.calibration["homing_offset"][calib_idx] + _, model = self.motors[name] + resolution = self.model_resolution[model] + + if drive_mode: + values[i] *= -1 + + # Convert from initial range to range [-180, 180] degrees + calib_val = (values[i] + homing_offset) / (resolution // 2) * HALF_TURN_DEGREE + in_range = (calib_val > LOWER_BOUND_DEGREE) and (calib_val < UPPER_BOUND_DEGREE) + + # Solve this inequality to find the factor to shift the range into [-180, 180] degrees + # values[i] = (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE + # - HALF_TURN_DEGREE <= (values[i] + homing_offset + resolution * factor) / (resolution // 2) * HALF_TURN_DEGREE <= HALF_TURN_DEGREE + # (- HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset) / resolution <= factor <= (HALF_TURN_DEGREE / 180 * (resolution // 2) - values[i] - homing_offset) / resolution + low_factor = ( + -HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset + ) / resolution + upp_factor = ( + HALF_TURN_DEGREE / HALF_TURN_DEGREE * (resolution // 2) - values[i] - homing_offset + ) / resolution + + elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: + start_pos = self.calibration["start_pos"][calib_idx] + end_pos = self.calibration["end_pos"][calib_idx] + + # Convert from initial range to range [0, 100] in % + calib_val = (values[i] - start_pos) / (end_pos - start_pos) * 100 + in_range = (calib_val > LOWER_BOUND_LINEAR) and (calib_val < UPPER_BOUND_LINEAR) + + # Solve this inequality to find the factor to shift the range into [0, 100] % + # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos + resolution * factor - start_pos - resolution * factor) * 100 + # values[i] = (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 + # 0 <= (values[i] - start_pos + resolution * factor) / (end_pos - start_pos) * 100 <= 100 + # (start_pos - values[i]) / resolution <= factor <= (end_pos - values[i]) / resolution + low_factor = (start_pos - values[i]) / resolution + upp_factor = (end_pos - values[i]) / resolution + + if not in_range: + # Get first integer between the two bounds + if low_factor < upp_factor: + factor = math.ceil(low_factor) + + if factor > upp_factor: + raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]") + else: + factor = math.ceil(upp_factor) + + if factor > low_factor: + raise ValueError(f"No integer found between bounds [{low_factor=}, {upp_factor=}]") + + if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: + out_of_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees" + in_range_str = f"{LOWER_BOUND_DEGREE} < {calib_val} < {UPPER_BOUND_DEGREE} degrees" + elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: + out_of_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %" + in_range_str = f"{LOWER_BOUND_LINEAR} < {calib_val} < {UPPER_BOUND_LINEAR} %" + + logging.warning( + f"Auto-correct calibration of motor '{name}' by shifting value by {abs(factor)} full turns, " + f"from '{out_of_range_str}' to '{in_range_str}'." + ) + + # A full turn corresponds to 360 degrees but also to 4096 steps for a motor resolution of 4096. + self.calibration["homing_offset"][calib_idx] += resolution * factor + + def revert_calibration(self, values: np.ndarray | list, motor_names: list[str] | None): + """Inverse of `apply_calibration`.""" + if motor_names is None: + motor_names = self.motor_names + + for i, name in enumerate(motor_names): + calib_idx = self.calibration["motor_names"].index(name) + calib_mode = self.calibration["calib_mode"][calib_idx] + + if CalibrationMode[calib_mode] == CalibrationMode.DEGREE: + drive_mode = self.calibration["drive_mode"][calib_idx] + homing_offset = self.calibration["homing_offset"][calib_idx] + _, model = self.motors[name] + resolution = self.model_resolution[model] + + # Convert from nominal 0-centered degree range [-180, 180] to + # 0-centered resolution range (e.g. [-2048, 2048] for resolution=4096) + values[i] = values[i] / HALF_TURN_DEGREE * (resolution // 2) + + # Substract the homing offsets to come back to actual motor range of values + # which can be arbitrary. + values[i] -= homing_offset + + # Remove drive mode, which is the rotation direction of the motor, to come back to + # actual motor rotation direction which can be arbitrary. + if drive_mode: + values[i] *= -1 + + elif CalibrationMode[calib_mode] == CalibrationMode.LINEAR: + start_pos = self.calibration["start_pos"][calib_idx] + end_pos = self.calibration["end_pos"][calib_idx] + + # Convert from nominal lnear range of [0, 100] % to + # actual motor range of values which can be arbitrary. + values[i] = values[i] / 100 * (end_pos - start_pos) + start_pos + + values = np.round(values).astype(np.int32) + return values + + def avoid_rotation_reset(self, values, motor_names, data_name): + if data_name not in self.track_positions: + self.track_positions[data_name] = { + "prev": [None] * len(self.motor_names), + # Assume False at initialization + "below_zero": [False] * len(self.motor_names), + "above_max": [False] * len(self.motor_names), + } + + track = self.track_positions[data_name] + + if motor_names is None: + motor_names = self.motor_names + + for i, name in enumerate(motor_names): + idx = self.motor_names.index(name) + + if track["prev"][idx] is None: + track["prev"][idx] = values[i] + continue + + # Detect a full rotation occured + if abs(track["prev"][idx] - values[i]) > 2048: + # Position went below 0 and got reset to 4095 + if track["prev"][idx] < values[i]: + # So we set negative value by adding a full rotation + values[i] -= 4096 + + # Position went above 4095 and got reset to 0 + elif track["prev"][idx] > values[i]: + # So we add a full rotation + values[i] += 4096 + + track["prev"][idx] = values[i] + + return values + + def read_with_motor_ids(self, motor_models, motor_ids, data_name, num_retry=NUM_READ_RETRY): + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + return_list = True + if not isinstance(motor_ids, list): + return_list = False + motor_ids = [motor_ids] + + assert_same_address(self.model_ctrl_table, self.motor_models, data_name) + addr, bytes = self.model_ctrl_table[motor_models[0]][data_name] + group = scs.GroupSyncRead(self.port_handler, self.packet_handler, addr, bytes) + for idx in motor_ids: + group.addParam(idx) + + for _ in range(num_retry): + comm = group.txRxPacket() + if comm == scs.COMM_SUCCESS: + break + + if comm != scs.COMM_SUCCESS: + raise ConnectionError( + f"Read failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: " + f"{self.packet_handler.getTxRxResult(comm)}" + ) + + values = [] + for idx in motor_ids: + value = group.getData(idx, addr, bytes) + values.append(value) + + if return_list: + return values + else: + return values[0] + + def read(self, data_name, motor_names: str | list[str] | None = None): + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + if not self.is_connected: + raise RobotDeviceNotConnectedError( + f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`." + ) + + start_time = time.perf_counter() + + if motor_names is None: + motor_names = self.motor_names + + if isinstance(motor_names, str): + motor_names = [motor_names] + + motor_ids = [] + models = [] + for name in motor_names: + motor_idx, model = self.motors[name] + motor_ids.append(motor_idx) + models.append(model) + + assert_same_address(self.model_ctrl_table, models, data_name) + addr, bytes = self.model_ctrl_table[model][data_name] + group_key = get_group_sync_key(data_name, motor_names) + + if data_name not in self.group_readers: + # create new group reader + self.group_readers[group_key] = scs.GroupSyncRead( + self.port_handler, self.packet_handler, addr, bytes + ) + for idx in motor_ids: + self.group_readers[group_key].addParam(idx) + + for _ in range(NUM_READ_RETRY): + comm = self.group_readers[group_key].txRxPacket() + if comm == scs.COMM_SUCCESS: + break + + if comm != scs.COMM_SUCCESS: + raise ConnectionError( + f"Read failed due to communication error on port {self.port} for group_key {group_key}: " + f"{self.packet_handler.getTxRxResult(comm)}" + ) + + values = [] + for idx in motor_ids: + value = self.group_readers[group_key].getData(idx, addr, bytes) + values.append(value) + + values = np.array(values) + + # Convert to signed int to use range [-2048, 2048] for our motor positions. + if data_name in CONVERT_UINT32_TO_INT32_REQUIRED: + values = values.astype(np.int32) + + if data_name in CALIBRATION_REQUIRED: + values = self.avoid_rotation_reset(values, motor_names, data_name) + + if data_name in CALIBRATION_REQUIRED and self.calibration is not None: + values = self.apply_calibration_autocorrect(values, motor_names) + + # log the number of seconds it took to read the data from the motors + delta_ts_name = get_log_name("delta_timestamp_s", "read", data_name, motor_names) + self.logs[delta_ts_name] = time.perf_counter() - start_time + + # log the utc time at which the data was received + ts_utc_name = get_log_name("timestamp_utc", "read", data_name, motor_names) + self.logs[ts_utc_name] = capture_timestamp_utc() + + return values + + def write_with_motor_ids(self, motor_models, motor_ids, data_name, values, num_retry=NUM_WRITE_RETRY): + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + if not isinstance(motor_ids, list): + motor_ids = [motor_ids] + if not isinstance(values, list): + values = [values] + + assert_same_address(self.model_ctrl_table, motor_models, data_name) + addr, bytes = self.model_ctrl_table[motor_models[0]][data_name] + group = scs.GroupSyncWrite(self.port_handler, self.packet_handler, addr, bytes) + for idx, value in zip(motor_ids, values, strict=True): + data = convert_to_bytes(value, bytes, self.mock) + group.addParam(idx, data) + + for _ in range(num_retry): + comm = group.txPacket() + if comm == scs.COMM_SUCCESS: + break + + if comm != scs.COMM_SUCCESS: + raise ConnectionError( + f"Write failed due to communication error on port {self.port_handler.port_name} for indices {motor_ids}: " + f"{self.packet_handler.getTxRxResult(comm)}" + ) + + def write(self, data_name, values: int | float | np.ndarray, motor_names: str | list[str] | None = None): + if not self.is_connected: + raise RobotDeviceNotConnectedError( + f"FeetechMotorsBus({self.port}) is not connected. You need to run `motors_bus.connect()`." + ) + + start_time = time.perf_counter() + + if self.mock: + import tests.mock_scservo_sdk as scs + else: + import scservo_sdk as scs + + if motor_names is None: + motor_names = self.motor_names + + if isinstance(motor_names, str): + motor_names = [motor_names] + + if isinstance(values, (int, float, np.integer)): + values = [int(values)] * len(motor_names) + + values = np.array(values) + + motor_ids = [] + models = [] + for name in motor_names: + motor_idx, model = self.motors[name] + motor_ids.append(motor_idx) + models.append(model) + + if data_name in CALIBRATION_REQUIRED and self.calibration is not None: + values = self.revert_calibration(values, motor_names) + + values = values.tolist() + + assert_same_address(self.model_ctrl_table, models, data_name) + addr, bytes = self.model_ctrl_table[model][data_name] + group_key = get_group_sync_key(data_name, motor_names) + + init_group = data_name not in self.group_readers + if init_group: + self.group_writers[group_key] = scs.GroupSyncWrite( + self.port_handler, self.packet_handler, addr, bytes + ) + + for idx, value in zip(motor_ids, values, strict=True): + data = convert_to_bytes(value, bytes, self.mock) + if init_group: + self.group_writers[group_key].addParam(idx, data) + else: + self.group_writers[group_key].changeParam(idx, data) + + comm = self.group_writers[group_key].txPacket() + if comm != scs.COMM_SUCCESS: + raise ConnectionError( + f"Write failed due to communication error on port {self.port} for group_key {group_key}: " + f"{self.packet_handler.getTxRxResult(comm)}" + ) + + # log the number of seconds it took to write the data to the motors + delta_ts_name = get_log_name("delta_timestamp_s", "write", data_name, motor_names) + self.logs[delta_ts_name] = time.perf_counter() - start_time + + # TODO(rcadene): should we log the time before sending the write command? + # log the utc time when the write has been completed + ts_utc_name = get_log_name("timestamp_utc", "write", data_name, motor_names) + self.logs[ts_utc_name] = capture_timestamp_utc() + + def disconnect(self): + if not self.is_connected: + raise RobotDeviceNotConnectedError( + f"FeetechMotorsBus({self.port}) is not connected. Try running `motors_bus.connect()` first." + ) + + if self.port_handler is not None: + self.port_handler.closePort() + self.port_handler = None + + self.packet_handler = None + self.group_readers = {} + self.group_writers = {} + self.is_connected = False + + def __del__(self): + if getattr(self, "is_connected", False): + self.disconnect() diff --git a/lerobot/common/robot_devices/robots/dynamixel_calibration.py b/lerobot/common/robot_devices/robots/dynamixel_calibration.py new file mode 100644 index 000000000..5c4932d2e --- /dev/null +++ b/lerobot/common/robot_devices/robots/dynamixel_calibration.py @@ -0,0 +1,130 @@ +"""Logic to calibrate a robot arm built with dynamixel motors""" +# TODO(rcadene, aliberts): move this logic into the robot code when refactoring + +import numpy as np + +from lerobot.common.robot_devices.motors.dynamixel import ( + CalibrationMode, + TorqueMode, + convert_degrees_to_steps, +) +from lerobot.common.robot_devices.motors.utils import MotorsBus + +URL_TEMPLATE = ( + "https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp" +) + +# The following positions are provided in nominal degree range ]-180, +180[ +# For more info on these constants, see comments in the code where they get used. +ZERO_POSITION_DEGREE = 0 +ROTATED_POSITION_DEGREE = 90 + + +def assert_drive_mode(drive_mode): + # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted. + if not np.all(np.isin(drive_mode, [0, 1])): + raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})") + + +def apply_drive_mode(position, drive_mode): + assert_drive_mode(drive_mode) + # Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted, + # to [-1, 1] with 1 indicates original rotation direction and -1 inverted. + signed_drive_mode = -(drive_mode * 2 - 1) + position *= signed_drive_mode + return position + + +def compute_nearest_rounded_position(position, models): + delta_turn = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, models) + nearest_pos = np.round(position.astype(float) / delta_turn) * delta_turn + return nearest_pos.astype(position.dtype) + + +def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): + """This function ensures that a neural network trained on data collected on a given robot + can work on another robot. For instance before calibration, setting a same goal position + for each motor of two different robots will get two very different positions. But after calibration, + the two robots will move to the same position.To this end, this function computes the homing offset + and the drive mode for each motor of a given robot. + + Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps + to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions + being 0. During the calibration process, you will need to manually move the robot to this "zero position". + + Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled + in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot + to the "rotated position". + + After calibration, the homing offsets and drive modes are stored in a cache. + + Example of usage: + ```python + run_arm_calibration(arm, "koch", "left", "follower") + ``` + """ + if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): + raise ValueError("To run calibration, the torque must be disabled on all motors.") + + print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") + + print("\nMove arm to zero position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero")) + input("Press Enter to continue...") + + # We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed. + # It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will + # correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position. + zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models) + + # Compute homing offset so that `present_position + homing_offset ~= target_position`. + zero_pos = arm.read("Present_Position") + zero_nearest_pos = compute_nearest_rounded_position(zero_pos, arm.motor_models) + homing_offset = zero_target_pos - zero_nearest_pos + + # The rotated target position corresponds to a rotation of a quarter turn from the zero position. + # This allows to identify the rotation direction of each motor. + # For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction + # is inverted. However, for the calibration being successful, we need everyone to follow the same target position. + # Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which + # corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view + # of the previous motor in the kinetic chain. + print("\nMove arm to rotated target position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated")) + input("Press Enter to continue...") + + rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models) + + # Find drive mode by rotating each motor by a quarter of a turn. + # Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0). + rotated_pos = arm.read("Present_Position") + drive_mode = (rotated_pos < zero_pos).astype(np.int32) + + # Re-compute homing offset to take into account drive mode + rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode) + rotated_nearest_pos = compute_nearest_rounded_position(rotated_drived_pos, arm.motor_models) + homing_offset = rotated_target_pos - rotated_nearest_pos + + print("\nMove arm to rest position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest")) + input("Press Enter to continue...") + print() + + # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180] + calib_mode = [CalibrationMode.DEGREE.name] * len(arm.motor_names) + + # TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml? + if robot_type in ["aloha"] and "gripper" in arm.motor_names: + # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100] + calib_idx = arm.motor_names.index("gripper") + calib_mode[calib_idx] = CalibrationMode.LINEAR.name + + calib_data = { + "homing_offset": homing_offset.tolist(), + "drive_mode": drive_mode.tolist(), + "start_pos": zero_pos.tolist(), + "end_pos": rotated_pos.tolist(), + "calib_mode": calib_mode, + "motor_names": arm.motor_names, + } + return calib_data diff --git a/lerobot/common/robot_devices/robots/factory.py b/lerobot/common/robot_devices/robots/factory.py index 2edcd2925..17e8e5e6a 100644 --- a/lerobot/common/robot_devices/robots/factory.py +++ b/lerobot/common/robot_devices/robots/factory.py @@ -1,7 +1,9 @@ import hydra from omegaconf import DictConfig +from lerobot.common.robot_devices.robots.utils import Robot -def make_robot(cfg: DictConfig): + +def make_robot(cfg: DictConfig) -> Robot: robot = hydra.utils.instantiate(cfg) return robot diff --git a/lerobot/common/robot_devices/robots/feetech_calibration.py b/lerobot/common/robot_devices/robots/feetech_calibration.py new file mode 100644 index 000000000..b015951a0 --- /dev/null +++ b/lerobot/common/robot_devices/robots/feetech_calibration.py @@ -0,0 +1,484 @@ +"""Logic to calibrate a robot arm built with feetech motors""" +# TODO(rcadene, aliberts): move this logic into the robot code when refactoring + +import time + +import numpy as np + +from lerobot.common.robot_devices.motors.feetech import ( + CalibrationMode, + TorqueMode, + convert_degrees_to_steps, +) +from lerobot.common.robot_devices.motors.utils import MotorsBus + +URL_TEMPLATE = ( + "https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp" +) + +# The following positions are provided in nominal degree range ]-180, +180[ +# For more info on these constants, see comments in the code where they get used. +ZERO_POSITION_DEGREE = 0 +ROTATED_POSITION_DEGREE = 90 + + +def assert_drive_mode(drive_mode): + # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted. + if not np.all(np.isin(drive_mode, [0, 1])): + raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})") + + +def apply_drive_mode(position, drive_mode): + assert_drive_mode(drive_mode) + # Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted, + # to [-1, 1] with 1 indicates original rotation direction and -1 inverted. + signed_drive_mode = -(drive_mode * 2 - 1) + position *= signed_drive_mode + return position + + +def move_until_block(arm, motor_name, positive_direction=True, while_move_hook=None): + count = 0 + while True: + present_pos = arm.read("Present_Position", motor_name) + if positive_direction: + # Move +100 steps every time. Lower the steps to lower the speed at which the arm moves. + arm.write("Goal_Position", present_pos + 100, motor_name) + else: + arm.write("Goal_Position", present_pos - 100, motor_name) + + if while_move_hook is not None: + while_move_hook() + + present_pos = arm.read("Present_Position", motor_name).item() + present_speed = arm.read("Present_Speed", motor_name).item() + present_current = arm.read("Present_Current", motor_name).item() + # present_load = arm.read("Present_Load", motor_name).item() + # present_voltage = arm.read("Present_Voltage", motor_name).item() + # present_temperature = arm.read("Present_Temperature", motor_name).item() + + # print(f"{present_pos=}") + # print(f"{present_speed=}") + # print(f"{present_current=}") + # print(f"{present_load=}") + # print(f"{present_voltage=}") + # print(f"{present_temperature=}") + + if present_speed == 0 and present_current > 40: + count += 1 + if count > 100 or present_current > 300: + return present_pos + else: + count = 0 + + +def move_to_calibrate( + arm, + motor_name, + invert_drive_mode=False, + positive_first=True, + in_between_move_hook=None, + while_move_hook=None, +): + initial_pos = arm.read("Present_Position", motor_name) + + if positive_first: + p_present_pos = move_until_block( + arm, motor_name, positive_direction=True, while_move_hook=while_move_hook + ) + else: + n_present_pos = move_until_block( + arm, motor_name, positive_direction=False, while_move_hook=while_move_hook + ) + + if in_between_move_hook is not None: + in_between_move_hook() + + if positive_first: + n_present_pos = move_until_block( + arm, motor_name, positive_direction=False, while_move_hook=while_move_hook + ) + else: + p_present_pos = move_until_block( + arm, motor_name, positive_direction=True, while_move_hook=while_move_hook + ) + + zero_pos = (n_present_pos + p_present_pos) / 2 + + calib_data = { + "initial_pos": initial_pos, + "homing_offset": zero_pos if invert_drive_mode else -zero_pos, + "invert_drive_mode": invert_drive_mode, + "drive_mode": -1 if invert_drive_mode else 0, + "zero_pos": zero_pos, + "start_pos": n_present_pos if invert_drive_mode else p_present_pos, + "end_pos": p_present_pos if invert_drive_mode else n_present_pos, + } + return calib_data + + +def apply_offset(calib, offset): + calib["zero_pos"] += offset + if calib["drive_mode"]: + calib["homing_offset"] += offset + else: + calib["homing_offset"] -= offset + return calib + + +def run_arm_auto_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): + if robot_type == "so100": + return run_arm_auto_calibration_so100(arm, robot_type, arm_name, arm_type) + elif robot_type == "moss": + return run_arm_auto_calibration_moss(arm, robot_type, arm_name, arm_type) + else: + raise ValueError(robot_type) + + +def run_arm_auto_calibration_so100(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): + """All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms""" + if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): + raise ValueError("To run calibration, the torque must be disabled on all motors.") + + if not (robot_type == "so100" and arm_type == "follower"): + raise NotImplementedError("Auto calibration only supports the follower of so100 arms for now.") + + print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") + + print("\nMove arm to initial position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial")) + input("Press Enter to continue...") + + # Lower the acceleration of the motors (in [0,254]) + initial_acceleration = arm.read("Acceleration") + arm.write("Lock", 0) + arm.write("Acceleration", 10) + time.sleep(1) + + arm.write("Torque_Enable", TorqueMode.ENABLED.value) + + print(f'{arm.read("Present_Position", "elbow_flex")=}') + + calib = {} + + init_wf_pos = arm.read("Present_Position", "wrist_flex") + init_sl_pos = arm.read("Present_Position", "shoulder_lift") + init_ef_pos = arm.read("Present_Position", "elbow_flex") + arm.write("Goal_Position", init_wf_pos - 800, "wrist_flex") + arm.write("Goal_Position", init_sl_pos + 150 + 1024, "shoulder_lift") + arm.write("Goal_Position", init_ef_pos - 2048, "elbow_flex") + time.sleep(2) + + print("Calibrate shoulder_pan") + calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan") + arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan") + time.sleep(1) + + print("Calibrate gripper") + calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True) + time.sleep(1) + + print("Calibrate wrist_flex") + calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex") + calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=80) + + def in_between_move_hook(): + nonlocal arm, calib + time.sleep(2) + ef_pos = arm.read("Present_Position", "elbow_flex") + sl_pos = arm.read("Present_Position", "shoulder_lift") + arm.write("Goal_Position", ef_pos + 1024, "elbow_flex") + arm.write("Goal_Position", sl_pos - 1024, "shoulder_lift") + time.sleep(2) + + print("Calibrate elbow_flex") + calib["elbow_flex"] = move_to_calibrate( + arm, "elbow_flex", positive_first=False, in_between_move_hook=in_between_move_hook + ) + calib["elbow_flex"] = apply_offset(calib["elbow_flex"], offset=80 - 1024) + + arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1024 + 512, "elbow_flex") + time.sleep(1) + + def in_between_move_hook(): + nonlocal arm, calib + arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"], "elbow_flex") + + print("Calibrate shoulder_lift") + calib["shoulder_lift"] = move_to_calibrate( + arm, + "shoulder_lift", + invert_drive_mode=True, + positive_first=False, + in_between_move_hook=in_between_move_hook, + ) + # add an 30 steps as offset to align with body + calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=1024 - 50) + + def while_move_hook(): + nonlocal arm, calib + positions = { + "shoulder_lift": round(calib["shoulder_lift"]["zero_pos"] - 1600), + "elbow_flex": round(calib["elbow_flex"]["zero_pos"] + 1700), + "wrist_flex": round(calib["wrist_flex"]["zero_pos"] + 800), + "gripper": round(calib["gripper"]["end_pos"]), + } + arm.write("Goal_Position", list(positions.values()), list(positions.keys())) + + arm.write("Goal_Position", round(calib["shoulder_lift"]["zero_pos"] - 1600), "shoulder_lift") + time.sleep(2) + arm.write("Goal_Position", round(calib["elbow_flex"]["zero_pos"] + 1700), "elbow_flex") + time.sleep(2) + arm.write("Goal_Position", round(calib["wrist_flex"]["zero_pos"] + 800), "wrist_flex") + time.sleep(2) + arm.write("Goal_Position", round(calib["gripper"]["end_pos"]), "gripper") + time.sleep(2) + + print("Calibrate wrist_roll") + calib["wrist_roll"] = move_to_calibrate( + arm, "wrist_roll", invert_drive_mode=True, positive_first=False, while_move_hook=while_move_hook + ) + + arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll") + time.sleep(1) + arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper") + time.sleep(1) + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex") + time.sleep(1) + arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 2048, "elbow_flex") + arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] - 2048, "shoulder_lift") + time.sleep(1) + arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan") + time.sleep(1) + + calib_modes = [] + for name in arm.motor_names: + if name == "gripper": + calib_modes.append(CalibrationMode.LINEAR.name) + else: + calib_modes.append(CalibrationMode.DEGREE.name) + + calib_dict = { + "homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names], + "drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names], + "start_pos": [calib[name]["start_pos"] for name in arm.motor_names], + "end_pos": [calib[name]["end_pos"] for name in arm.motor_names], + "calib_mode": calib_modes, + "motor_names": arm.motor_names, + } + + # Re-enable original accerlation + arm.write("Lock", 0) + arm.write("Acceleration", initial_acceleration) + time.sleep(1) + + return calib_dict + + +def run_arm_auto_calibration_moss(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): + """All the offsets and magic numbers are hand tuned, and are unique to SO-100 follower arms""" + if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): + raise ValueError("To run calibration, the torque must be disabled on all motors.") + + if not (robot_type == "moss" and arm_type == "follower"): + raise NotImplementedError("Auto calibration only supports the follower of moss arms for now.") + + print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") + + print("\nMove arm to initial position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="initial")) + input("Press Enter to continue...") + + # Lower the acceleration of the motors (in [0,254]) + initial_acceleration = arm.read("Acceleration") + arm.write("Lock", 0) + arm.write("Acceleration", 10) + time.sleep(1) + + arm.write("Torque_Enable", TorqueMode.ENABLED.value) + + sl_pos = arm.read("Present_Position", "shoulder_lift") + arm.write("Goal_Position", sl_pos - 1024 - 450, "shoulder_lift") + ef_pos = arm.read("Present_Position", "elbow_flex") + arm.write("Goal_Position", ef_pos + 1024 + 450, "elbow_flex") + time.sleep(2) + + calib = {} + + print("Calibrate shoulder_pan") + calib["shoulder_pan"] = move_to_calibrate(arm, "shoulder_pan") + arm.write("Goal_Position", calib["shoulder_pan"]["zero_pos"], "shoulder_pan") + time.sleep(1) + + print("Calibrate gripper") + calib["gripper"] = move_to_calibrate(arm, "gripper", invert_drive_mode=True) + time.sleep(1) + + print("Calibrate wrist_flex") + calib["wrist_flex"] = move_to_calibrate(arm, "wrist_flex", invert_drive_mode=True) + calib["wrist_flex"] = apply_offset(calib["wrist_flex"], offset=-210 + 1024) + + wr_pos = arm.read("Present_Position", "wrist_roll") + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex") + time.sleep(1) + arm.write("Goal_Position", wr_pos - 1024, "wrist_roll") + time.sleep(1) + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex") + time.sleep(1) + arm.write("Goal_Position", calib["gripper"]["end_pos"], "gripper") + time.sleep(1) + + print("Calibrate wrist_roll") + calib["wrist_roll"] = move_to_calibrate(arm, "wrist_roll", invert_drive_mode=True) + calib["wrist_roll"] = apply_offset(calib["wrist_roll"], offset=790) + + arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"] - 1024, "wrist_roll") + arm.write("Goal_Position", calib["gripper"]["start_pos"], "gripper") + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex") + time.sleep(1) + arm.write("Goal_Position", calib["wrist_roll"]["zero_pos"], "wrist_roll") + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 2048, "wrist_flex") + + def in_between_move_elbow_flex_hook(): + nonlocal arm, calib + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"], "wrist_flex") + + print("Calibrate elbow_flex") + calib["elbow_flex"] = move_to_calibrate( + arm, + "elbow_flex", + invert_drive_mode=True, + in_between_move_hook=in_between_move_elbow_flex_hook, + ) + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex") + + def in_between_move_shoulder_lift_hook(): + nonlocal arm, calib + sl = arm.read("Present_Position", "shoulder_lift") + arm.write("Goal_Position", sl - 1500, "shoulder_lift") + time.sleep(1) + arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] + 1536, "elbow_flex") + time.sleep(1) + arm.write("Goal_Position", calib["wrist_flex"]["start_pos"], "wrist_flex") + time.sleep(1) + + print("Calibrate shoulder_lift") + calib["shoulder_lift"] = move_to_calibrate( + arm, "shoulder_lift", in_between_move_hook=in_between_move_shoulder_lift_hook + ) + calib["shoulder_lift"] = apply_offset(calib["shoulder_lift"], offset=-1024) + + arm.write("Goal_Position", calib["wrist_flex"]["zero_pos"] - 1024, "wrist_flex") + time.sleep(1) + arm.write("Goal_Position", calib["shoulder_lift"]["zero_pos"] + 2048, "shoulder_lift") + arm.write("Goal_Position", calib["elbow_flex"]["zero_pos"] - 1024 - 400, "elbow_flex") + time.sleep(2) + + calib_modes = [] + for name in arm.motor_names: + if name == "gripper": + calib_modes.append(CalibrationMode.LINEAR.name) + else: + calib_modes.append(CalibrationMode.DEGREE.name) + + calib_dict = { + "homing_offset": [calib[name]["homing_offset"] for name in arm.motor_names], + "drive_mode": [calib[name]["drive_mode"] for name in arm.motor_names], + "start_pos": [calib[name]["start_pos"] for name in arm.motor_names], + "end_pos": [calib[name]["end_pos"] for name in arm.motor_names], + "calib_mode": calib_modes, + "motor_names": arm.motor_names, + } + + # Re-enable original accerlation + arm.write("Lock", 0) + arm.write("Acceleration", initial_acceleration) + time.sleep(1) + + return calib_dict + + +def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): + """This function ensures that a neural network trained on data collected on a given robot + can work on another robot. For instance before calibration, setting a same goal position + for each motor of two different robots will get two very different positions. But after calibration, + the two robots will move to the same position.To this end, this function computes the homing offset + and the drive mode for each motor of a given robot. + + Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps + to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions + being 0. During the calibration process, you will need to manually move the robot to this "zero position". + + Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled + in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot + to the "rotated position". + + After calibration, the homing offsets and drive modes are stored in a cache. + + Example of usage: + ```python + run_arm_calibration(arm, "so100", "left", "follower") + ``` + """ + if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): + raise ValueError("To run calibration, the torque must be disabled on all motors.") + + print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") + + print("\nMove arm to zero position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero")) + input("Press Enter to continue...") + + # We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed. + # It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will + # correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position. + zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models) + + # Compute homing offset so that `present_position + homing_offset ~= target_position`. + zero_pos = arm.read("Present_Position") + homing_offset = zero_target_pos - zero_pos + + # The rotated target position corresponds to a rotation of a quarter turn from the zero position. + # This allows to identify the rotation direction of each motor. + # For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction + # is inverted. However, for the calibration being successful, we need everyone to follow the same target position. + # Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which + # corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view + # of the previous motor in the kinetic chain. + print("\nMove arm to rotated target position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated")) + input("Press Enter to continue...") + + rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models) + + # Find drive mode by rotating each motor by a quarter of a turn. + # Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0). + rotated_pos = arm.read("Present_Position") + drive_mode = (rotated_pos < zero_pos).astype(np.int32) + + # Re-compute homing offset to take into account drive mode + rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode) + homing_offset = rotated_target_pos - rotated_drived_pos + + print("\nMove arm to rest position") + print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest")) + input("Press Enter to continue...") + print() + + # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180] + calib_modes = [] + for name in arm.motor_names: + if name == "gripper": + calib_modes.append(CalibrationMode.LINEAR.name) + else: + calib_modes.append(CalibrationMode.DEGREE.name) + + calib_dict = { + "homing_offset": homing_offset.tolist(), + "drive_mode": drive_mode.tolist(), + "start_pos": zero_pos.tolist(), + "end_pos": rotated_pos.tolist(), + "calib_mode": calib_modes, + "motor_names": arm.motor_names, + } + return calib_dict diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py index 337519765..618105064 100644 --- a/lerobot/common/robot_devices/robots/manipulator.py +++ b/lerobot/common/robot_devices/robots/manipulator.py @@ -1,3 +1,9 @@ +"""Contains logic to instantiate a robot, read information from its motors and cameras, +and send orders to its motors. +""" +# TODO(rcadene, aliberts): reorganize the codebase into one file per robot, with the associated +# calibration procedure, to make it easy for people to add their own robot. + import json import logging import time @@ -10,138 +16,10 @@ import torch from lerobot.common.robot_devices.cameras.utils import Camera -from lerobot.common.robot_devices.motors.dynamixel import ( - CalibrationMode, - TorqueMode, - convert_degrees_to_steps, -) from lerobot.common.robot_devices.motors.utils import MotorsBus from lerobot.common.robot_devices.robots.utils import get_arm_id from lerobot.common.robot_devices.utils import RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError -######################################################################## -# Calibration logic -######################################################################## - -URL_TEMPLATE = ( - "https://raw.githubusercontent.com/huggingface/lerobot/main/media/{robot}/{arm}_{position}.webp" -) - -# The following positions are provided in nominal degree range ]-180, +180[ -# For more info on these constants, see comments in the code where they get used. -ZERO_POSITION_DEGREE = 0 -ROTATED_POSITION_DEGREE = 90 - - -def assert_drive_mode(drive_mode): - # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted. - if not np.all(np.isin(drive_mode, [0, 1])): - raise ValueError(f"`drive_mode` contains values other than 0 or 1: ({drive_mode})") - - -def apply_drive_mode(position, drive_mode): - assert_drive_mode(drive_mode) - # Convert `drive_mode` from [0, 1] with 0 indicates original rotation direction and 1 inverted, - # to [-1, 1] with 1 indicates original rotation direction and -1 inverted. - signed_drive_mode = -(drive_mode * 2 - 1) - position *= signed_drive_mode - return position - - -def compute_nearest_rounded_position(position, models): - delta_turn = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, models) - nearest_pos = np.round(position.astype(float) / delta_turn) * delta_turn - return nearest_pos.astype(position.dtype) - - -def run_arm_calibration(arm: MotorsBus, robot_type: str, arm_name: str, arm_type: str): - """This function ensures that a neural network trained on data collected on a given robot - can work on another robot. For instance before calibration, setting a same goal position - for each motor of two different robots will get two very different positions. But after calibration, - the two robots will move to the same position.To this end, this function computes the homing offset - and the drive mode for each motor of a given robot. - - Homing offset is used to shift the motor position to a ]-2048, +2048[ nominal range (when the motor uses 2048 steps - to complete a half a turn). This range is set around an arbitrary "zero position" corresponding to all motor positions - being 0. During the calibration process, you will need to manually move the robot to this "zero position". - - Drive mode is used to invert the rotation direction of the motor. This is useful when some motors have been assembled - in the opposite orientation for some robots. During the calibration process, you will need to manually move the robot - to the "rotated position". - - After calibration, the homing offsets and drive modes are stored in a cache. - - Example of usage: - ```python - run_arm_calibration(arm, "koch", "left", "follower") - ``` - """ - if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): - raise ValueError("To run calibration, the torque must be disabled on all motors.") - - print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") - - print("\nMove arm to zero position") - print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero")) - input("Press Enter to continue...") - - # We arbitrarily chose our zero target position to be a straight horizontal position with gripper upwards and closed. - # It is easy to identify and all motors are in a "quarter turn" position. Once calibration is done, this position will - # correspond to every motor angle being 0. If you set all 0 as Goal Position, the arm will move in this position. - zero_target_pos = convert_degrees_to_steps(ZERO_POSITION_DEGREE, arm.motor_models) - - # Compute homing offset so that `present_position + homing_offset ~= target_position`. - zero_pos = arm.read("Present_Position") - zero_nearest_pos = compute_nearest_rounded_position(zero_pos, arm.motor_models) - homing_offset = zero_target_pos - zero_nearest_pos - - # The rotated target position corresponds to a rotation of a quarter turn from the zero position. - # This allows to identify the rotation direction of each motor. - # For instance, if the motor rotates 90 degree, and its value is -90 after applying the homing offset, then we know its rotation direction - # is inverted. However, for the calibration being successful, we need everyone to follow the same target position. - # Sometimes, there is only one possible rotation direction. For instance, if the gripper is closed, there is only one direction which - # corresponds to opening the gripper. When the rotation direction is ambiguous, we arbitrarely rotate clockwise from the point of view - # of the previous motor in the kinetic chain. - print("\nMove arm to rotated target position") - print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rotated")) - input("Press Enter to continue...") - - rotated_target_pos = convert_degrees_to_steps(ROTATED_POSITION_DEGREE, arm.motor_models) - - # Find drive mode by rotating each motor by a quarter of a turn. - # Drive mode indicates if the motor rotation direction should be inverted (=1) or not (=0). - rotated_pos = arm.read("Present_Position") - drive_mode = (rotated_pos < zero_pos).astype(np.int32) - - # Re-compute homing offset to take into account drive mode - rotated_drived_pos = apply_drive_mode(rotated_pos, drive_mode) - rotated_nearest_pos = compute_nearest_rounded_position(rotated_drived_pos, arm.motor_models) - homing_offset = rotated_target_pos - rotated_nearest_pos - - print("\nMove arm to rest position") - print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="rest")) - input("Press Enter to continue...") - print() - - # Joints with rotational motions are expressed in degrees in nominal range of [-180, 180] - calib_mode = [CalibrationMode.DEGREE.name] * len(arm.motor_names) - - # TODO(rcadene): make type of joints (DEGREE or LINEAR) configurable from yaml? - if robot_type == "aloha" and "gripper" in arm.motor_names: - # Joints with linear motions (like gripper of Aloha) are experessed in nominal range of [0, 100] - calib_idx = arm.motor_names.index("gripper") - calib_mode[calib_idx] = CalibrationMode.LINEAR.name - - calib_data = { - "homing_offset": homing_offset.tolist(), - "drive_mode": drive_mode.tolist(), - "start_pos": zero_pos.tolist(), - "end_pos": rotated_pos.tolist(), - "calib_mode": calib_mode, - "motor_names": arm.motor_names, - } - return calib_data - def ensure_safe_goal_position( goal_pos: torch.Tensor, present_pos: torch.Tensor, max_relative_target: float | list[float] @@ -163,11 +41,6 @@ def ensure_safe_goal_position( return safe_goal_pos -######################################################################## -# Manipulator robot -######################################################################## - - @dataclass class ManipulatorRobotConfig: """ @@ -178,7 +51,7 @@ class ManipulatorRobotConfig: """ # Define all components of the robot - robot_type: str | None = None + robot_type: str = "koch" leader_arms: dict[str, MotorsBus] = field(default_factory=lambda: {}) follower_arms: dict[str, MotorsBus] = field(default_factory=lambda: {}) cameras: dict[str, Camera] = field(default_factory=lambda: {}) @@ -207,6 +80,10 @@ def __setattr__(self, prop: str, val): ) super().__setattr__(prop, val) + def __post_init__(self): + if self.robot_type not in ["koch", "koch_bimanual", "aloha", "so100", "moss"]: + raise ValueError(f"Provided robot type ({self.robot_type}) is not supported.") + class ManipulatorRobot: # TODO(rcadene): Implement force feedback @@ -349,6 +226,61 @@ def __init__( self.is_connected = False self.logs = {} + def get_motor_names(self, arm: dict[str, MotorsBus]) -> list: + return [f"{arm}_{motor}" for arm, bus in arm.items() for motor in bus.motors] + + @property + def camera_features(self) -> dict: + cam_ft = {} + for cam_key, cam in self.cameras.items(): + key = f"observation.images.{cam_key}" + cam_ft[key] = { + "shape": (cam.height, cam.width, cam.channels), + "names": ["height", "width", "channels"], + "info": None, + } + return cam_ft + + @property + def motor_features(self) -> dict: + action_names = self.get_motor_names(self.leader_arms) + state_names = self.get_motor_names(self.leader_arms) + return { + "action": { + "dtype": "float32", + "shape": (len(action_names),), + "names": action_names, + }, + "observation.state": { + "dtype": "float32", + "shape": (len(state_names),), + "names": state_names, + }, + } + + @property + def features(self): + return {**self.motor_features, **self.camera_features} + + @property + def has_camera(self): + return len(self.cameras) > 0 + + @property + def num_cameras(self): + return len(self.cameras) + + @property + def available_arms(self): + available_arms = [] + for name in self.follower_arms: + arm_id = get_arm_id(name, "follower") + available_arms.append(arm_id) + for name in self.leader_arms: + arm_id = get_arm_id(name, "leader") + available_arms.append(arm_id) + return available_arms + def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError( @@ -364,9 +296,15 @@ def connect(self): for name in self.follower_arms: print(f"Connecting {name} follower arm.") self.follower_arms[name].connect() + for name in self.leader_arms: print(f"Connecting {name} leader arm.") self.leader_arms[name].connect() + if self.robot_type in ["koch", "koch_bimanual", "aloha"]: + from lerobot.common.robot_devices.motors.dynamixel import TorqueMode + elif self.robot_type in ["so100", "moss"]: + from lerobot.common.robot_devices.motors.feetech import TorqueMode + # We assume that at connection time, arms are in a rest position, and torque can # be safely disabled to run calibration and/or set robot preset configurations. for name in self.follower_arms: @@ -377,12 +315,12 @@ def connect(self): self.activate_calibration() # Set robot preset (e.g. torque in leader gripper for Koch v1.1) - if self.robot_type == "koch": + if self.robot_type in ["koch", "koch_bimanual"]: self.set_koch_robot_preset() elif self.robot_type == "aloha": self.set_aloha_robot_preset() - else: - warnings.warn(f"No preset found for robot type: {self.robot_type}", stacklevel=1) + elif self.robot_type in ["so100", "moss"]: + self.set_so100_robot_preset() # Enable torque on all motors of the follower arms for name in self.follower_arms: @@ -390,12 +328,22 @@ def connect(self): self.follower_arms[name].write("Torque_Enable", 1) if self.config.gripper_open_degree is not None: + if self.robot_type not in ["koch", "koch_bimanual"]: + raise NotImplementedError( + f"{self.robot_type} does not support position AND current control in the handle, which is require to set the gripper open." + ) # Set the leader arm in torque mode with the gripper motor set to an angle. This makes it possible # to squeeze the gripper and have it spring back to an open position on its own. for name in self.leader_arms: self.leader_arms[name].write("Torque_Enable", 1, "gripper") self.leader_arms[name].write("Goal_Position", self.config.gripper_open_degree, "gripper") + # Check both arms can be read + for name in self.follower_arms: + self.follower_arms[name].read("Present_Position") + for name in self.leader_arms: + self.leader_arms[name].read("Present_Position") + # Connect the cameras for name in self.cameras: self.cameras[name].connect() @@ -416,8 +364,20 @@ def load_or_run_calibration_(name, arm, arm_type): with open(arm_calib_path) as f: calibration = json.load(f) else: + # TODO(rcadene): display a warning in __init__ if calibration file not available print(f"Missing calibration file '{arm_calib_path}'") - calibration = run_arm_calibration(arm, self.robot_type, name, arm_type) + + if self.robot_type in ["koch", "koch_bimanual", "aloha"]: + from lerobot.common.robot_devices.robots.dynamixel_calibration import run_arm_calibration + + calibration = run_arm_calibration(arm, self.robot_type, name, arm_type) + + elif self.robot_type in ["so100", "moss"]: + from lerobot.common.robot_devices.robots.feetech_calibration import ( + run_arm_manual_calibration, + ) + + calibration = run_arm_manual_calibration(arm, self.robot_type, name, arm_type) print(f"Calibration is done! Saving calibration file '{arm_calib_path}'") arm_calib_path.parent.mkdir(parents=True, exist_ok=True) @@ -435,6 +395,8 @@ def load_or_run_calibration_(name, arm, arm_type): def set_koch_robot_preset(self): def set_operating_mode_(arm): + from lerobot.common.robot_devices.motors.dynamixel import TorqueMode + if (arm.read("Torque_Enable") != TorqueMode.DISABLED.value).any(): raise ValueError("To run set robot preset, the torque must be disabled on all motors.") @@ -522,6 +484,23 @@ def set_shadow_(arm): stacklevel=1, ) + def set_so100_robot_preset(self): + for name in self.follower_arms: + # Mode=0 for Position Control + self.follower_arms[name].write("Mode", 0) + # Set P_Coefficient to lower value to avoid shakiness (Default is 32) + self.follower_arms[name].write("P_Coefficient", 16) + # Set I_Coefficient and D_Coefficient to default value 0 and 32 + self.follower_arms[name].write("I_Coefficient", 0) + self.follower_arms[name].write("D_Coefficient", 32) + # Close the write lock so that Maximum_Acceleration gets written to EPROM address, + # which is mandatory for Maximum_Acceleration to take effect after rebooting. + self.follower_arms[name].write("Lock", 0) + # Set Maximum_Acceleration to 254 to speedup acceleration and deceleration of + # the motors. Note: this configuration is not in the official STS3215 Memory Table + self.follower_arms[name].write("Maximum_Acceleration", 254) + self.follower_arms[name].write("Acceleration", 254) + def teleop_step( self, record_data=False ) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: @@ -681,6 +660,10 @@ def send_action(self, action: torch.Tensor) -> torch.Tensor: return torch.cat(action_sent) + def print_logs(self): + pass + # TODO(aliberts): move robot-specific logs logic here + def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError( diff --git a/lerobot/common/robot_devices/robots/stretch.py b/lerobot/common/robot_devices/robots/stretch.py new file mode 100644 index 000000000..ff86b6d80 --- /dev/null +++ b/lerobot/common/robot_devices/robots/stretch.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from dataclasses import dataclass, field, replace + +import torch +from stretch_body.gamepad_teleop import GamePadTeleop +from stretch_body.robot import Robot as StretchAPI +from stretch_body.robot_params import RobotParams + +from lerobot.common.robot_devices.cameras.utils import Camera + + +@dataclass +class StretchRobotConfig: + robot_type: str | None = "stretch" + cameras: dict[str, Camera] = field(default_factory=lambda: {}) + # TODO(aliberts): add feature with max_relative target + # TODO(aliberts): add comment on max_relative target + max_relative_target: list[float] | float | None = None + + +class StretchRobot(StretchAPI): + """Wrapper of stretch_body.robot.Robot""" + + def __init__(self, config: StretchRobotConfig | None = None, **kwargs): + super().__init__() + if config is None: + config = StretchRobotConfig() + # Overwrite config arguments using kwargs + self.config = replace(config, **kwargs) + + self.robot_type = self.config.robot_type + self.cameras = self.config.cameras + self.is_connected = False + self.teleop = None + self.logs = {} + + # TODO(aliberts): test this + RobotParams.set_logging_level("WARNING") + RobotParams.set_logging_formatter("brief_console_formatter") + + self.state_keys = None + self.action_keys = None + + def connect(self) -> None: + self.is_connected = self.startup() + if not self.is_connected: + print("Another process is already using Stretch. Try running 'stretch_free_robot_process.py'") + raise ConnectionError() + + for name in self.cameras: + self.cameras[name].connect() + self.is_connected = self.is_connected and self.cameras[name].is_connected + + if not self.is_connected: + print("Could not connect to the cameras, check that all cameras are plugged-in.") + raise ConnectionError() + + self.run_calibration() + + def run_calibration(self) -> None: + if not self.is_homed(): + self.home() + + def teleop_step( + self, record_data=False + ) -> None | tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]: + # TODO(aliberts): return ndarrays instead of torch.Tensors + if not self.is_connected: + raise ConnectionError() + + if self.teleop is None: + self.teleop = GamePadTeleop(robot_instance=False) + self.teleop.startup(robot=self) + + before_read_t = time.perf_counter() + state = self.get_state() + action = self.teleop.gamepad_controller.get_state() + self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t + + before_write_t = time.perf_counter() + self.teleop.do_motion(robot=self) + self.push_command() + self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t + + if self.state_keys is None: + self.state_keys = list(state) + + if not record_data: + return + + state = torch.as_tensor(list(state.values())) + action = torch.as_tensor(list(action.values())) + + # Capture images from cameras + images = {} + for name in self.cameras: + before_camread_t = time.perf_counter() + images[name] = self.cameras[name].async_read() + images[name] = torch.from_numpy(images[name]) + self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"] + self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t + + # Populate output dictionnaries + obs_dict, action_dict = {}, {} + obs_dict["observation.state"] = state + action_dict["action"] = action + for name in self.cameras: + obs_dict[f"observation.images.{name}"] = images[name] + + return obs_dict, action_dict + + def get_state(self) -> dict: + status = self.get_status() + return { + "head_pan.pos": status["head"]["head_pan"]["pos"], + "head_tilt.pos": status["head"]["head_tilt"]["pos"], + "lift.pos": status["lift"]["pos"], + "arm.pos": status["arm"]["pos"], + "wrist_pitch.pos": status["end_of_arm"]["wrist_pitch"]["pos"], + "wrist_roll.pos": status["end_of_arm"]["wrist_roll"]["pos"], + "wrist_yaw.pos": status["end_of_arm"]["wrist_yaw"]["pos"], + "gripper.pos": status["end_of_arm"]["stretch_gripper"]["pos"], + "base_x.vel": status["base"]["x_vel"], + "base_y.vel": status["base"]["y_vel"], + "base_theta.vel": status["base"]["theta_vel"], + } + + def capture_observation(self) -> dict: + # TODO(aliberts): return ndarrays instead of torch.Tensors + before_read_t = time.perf_counter() + state = self.get_state() + self.logs["read_pos_dt_s"] = time.perf_counter() - before_read_t + + if self.state_keys is None: + self.state_keys = list(state) + + state = torch.as_tensor(list(state.values())) + + # Capture images from cameras + images = {} + for name in self.cameras: + before_camread_t = time.perf_counter() + images[name] = self.cameras[name].async_read() + images[name] = torch.from_numpy(images[name]) + self.logs[f"read_camera_{name}_dt_s"] = self.cameras[name].logs["delta_timestamp_s"] + self.logs[f"async_read_camera_{name}_dt_s"] = time.perf_counter() - before_camread_t + + # Populate output dictionnaries + obs_dict = {} + obs_dict["observation.state"] = state + for name in self.cameras: + obs_dict[f"observation.images.{name}"] = images[name] + + return obs_dict + + def send_action(self, action: torch.Tensor) -> torch.Tensor: + # TODO(aliberts): return ndarrays instead of torch.Tensors + if not self.is_connected: + raise ConnectionError() + + if self.teleop is None: + self.teleop = GamePadTeleop(robot_instance=False) + self.teleop.startup(robot=self) + + if self.action_keys is None: + dummy_action = self.teleop.gamepad_controller.get_state() + self.action_keys = list(dummy_action.keys()) + + action_dict = dict(zip(self.action_keys, action.tolist(), strict=True)) + + before_write_t = time.perf_counter() + self.teleop.do_motion(state=action_dict, robot=self) + self.push_command() + self.logs["write_pos_dt_s"] = time.perf_counter() - before_write_t + + # TODO(aliberts): return action_sent when motion is limited + return action + + def print_logs(self) -> None: + pass + # TODO(aliberts): move robot-specific logs logic here + + def teleop_safety_stop(self) -> None: + if self.teleop is not None: + self.teleop._safety_stop(robot=self) + + def disconnect(self) -> None: + self.stop() + if self.teleop is not None: + self.teleop.gamepad_controller.stop() + self.teleop.stop() + + if len(self.cameras) > 0: + for cam in self.cameras.values(): + cam.disconnect() + + self.is_connected = False + + def __del__(self): + self.disconnect() diff --git a/lerobot/common/robot_devices/robots/utils.py b/lerobot/common/robot_devices/robots/utils.py index 122155f78..a40db1312 100644 --- a/lerobot/common/robot_devices/robots/utils.py +++ b/lerobot/common/robot_devices/robots/utils.py @@ -9,8 +9,13 @@ def get_arm_id(name, arm_type): class Robot(Protocol): - def init_teleop(self): ... + # TODO(rcadene, aliberts): Add unit test checking the protocol is implemented in the corresponding classes + robot_type: str + features: dict + + def connect(self): ... def run_calibration(self): ... def teleop_step(self, record_data=False): ... def capture_observation(self): ... def send_action(self, action): ... + def disconnect(self): ... diff --git a/lerobot/common/robot_devices/utils.py b/lerobot/common/robot_devices/utils.py index bcbeb8e02..19bb637e5 100644 --- a/lerobot/common/robot_devices/utils.py +++ b/lerobot/common/robot_devices/utils.py @@ -16,6 +16,20 @@ def busy_wait(seconds): time.sleep(seconds) +def safe_disconnect(func): + # TODO(aliberts): Allow to pass custom exceptions + # (e.g. ThreadServiceExit, KeyboardInterrupt, SystemExit, UnpluggedError, DynamixelCommError) + def wrapper(robot, *args, **kwargs): + try: + return func(robot, *args, **kwargs) + except Exception as e: + if robot.is_connected: + robot.disconnect() + raise e + + return wrapper + + class RobotDeviceNotConnectedError(Exception): """Exception raised when the robot device is not connected.""" diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 1aa0bc2d4..8e6b160af 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -16,16 +16,23 @@ import logging import os import os.path as osp +import platform import random from contextlib import contextmanager from datetime import datetime, timezone from pathlib import Path -from typing import Any, Generator +from typing import Any, Callable, Generator import hydra import numpy as np import torch -from omegaconf import DictConfig +from omegaconf import DictConfig, OmegaConf + + +def none_or_int(value): + if value == "None": + return None + return int(value) def inside_slurm(): @@ -34,12 +41,12 @@ def inside_slurm(): return "SLURM_JOB_ID" in os.environ -def get_safe_torch_device(cfg_device: str, log: bool = False) -> torch.device: +def get_safe_torch_device(cfg_device: str, log: bool = False, accelerator: Callable = None) -> torch.device: """Given a string, return a torch.device with checks on whether the device is available.""" match cfg_device: case "cuda": assert torch.cuda.is_available() - device = torch.device("cuda") + device = accelerator.device if accelerator else torch.device("cuda") case "mps": assert torch.backends.mps.is_available() device = torch.device("mps") @@ -80,13 +87,17 @@ def set_global_random_state(random_state_dict: dict[str, Any]): torch.cuda.random.set_rng_state(random_state_dict["torch_cuda_random_state"]) -def set_global_seed(seed): +def set_global_seed(seed, accelerator: Callable = None): """Set seed for reproducibility.""" random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) + if accelerator: + from accelerate.utils import set_seed + + set_seed(seed) @contextmanager @@ -108,7 +119,7 @@ def seeded_context(seed: int) -> Generator[None, None, None]: set_global_random_state(random_state_dict) -def init_logging(): +def init_logging(accelerator: Callable = None): def custom_format(record): dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S") fnameline = f"{record.pathname}:{record.lineno}" @@ -126,6 +137,11 @@ def custom_format(record): console_handler.setFormatter(formatter) logging.getLogger().addHandler(console_handler) + if accelerator is not None and not accelerator.is_main_process: + # Disable duplicate logging on non-main processes + logging.info(f"Setting logging level on non-main process {accelerator.process_index} to WARNING.") + logging.getLogger().setLevel(logging.WARNING) + def format_big_number(num, precision=0): suffixes = ["", "K", "M", "B", "T", "Q"] @@ -183,3 +199,54 @@ def print_cuda_memory_usage(): def capture_timestamp_utc(): return datetime.now(timezone.utc) + + +def say(text, blocking=False): + # Check if mac, linux, or windows. + if platform.system() == "Darwin": + cmd = f'say "{text}"' + if not blocking: + cmd += " &" + elif platform.system() == "Linux": + cmd = f'spd-say "{text}"' + if blocking: + cmd += " --wait" + elif platform.system() == "Windows": + # TODO(rcadene): Make blocking option work for Windows + cmd = ( + 'PowerShell -Command "Add-Type -AssemblyName System.Speech; ' + f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\"" + ) + + os.system(cmd) + + +def log_say(text, play_sounds, blocking=False): + logging.info(text) + + if play_sounds: + say(text, blocking) + + +def is_launched_with_accelerate() -> bool: + return "ACCELERATE_MIXED_PRECISION" in os.environ + + +def get_accelerate_config(accelerator: Callable = None) -> dict[str, Any]: + config = {} + if not accelerator: + return config + config["num_processes"] = accelerator.num_processes + config["device"] = str(accelerator.device) + config["distributed_type"] = str(accelerator.distributed_type) + config["mixed_precision"] = accelerator.mixed_precision + config["gradient_accumulation_steps"] = accelerator.gradient_accumulation_steps + + return config + + +def update_omegaconf(cfg: DictConfig, config_name: str, config: dict[str, Any]) -> DictConfig: + cfg_dict = OmegaConf.to_container(cfg, resolve=True) + cfg_dict[config_name] = config + cfg = OmegaConf.create(cfg_dict) + return cfg diff --git a/lerobot/configs/default.yaml b/lerobot/configs/default.yaml index a3ff1d41b..ba42a10c1 100644 --- a/lerobot/configs/default.yaml +++ b/lerobot/configs/default.yaml @@ -37,7 +37,7 @@ training: num_workers: 4 batch_size: ??? - + lr_scheduler: eval_freq: ??? log_freq: 200 save_checkpoint: true @@ -121,6 +121,8 @@ eval: batch_size: 1 # `use_async_envs` specifies whether to use asynchronous environments (multiprocessing). use_async_envs: false + # maximum number of episodes to render into videos. + max_episodes_rendered: 10 wandb: enable: false diff --git a/lerobot/configs/env/aloha_real.yaml b/lerobot/configs/env/aloha_real.yaml new file mode 100644 index 000000000..57af4be20 --- /dev/null +++ b/lerobot/configs/env/aloha_real.yaml @@ -0,0 +1,10 @@ +# @package _global_ + +fps: 30 + +env: + name: real_world + task: null + state_dim: 18 + action_dim: 18 + fps: ${fps} diff --git a/lerobot/configs/env/moss_real.yaml b/lerobot/configs/env/moss_real.yaml new file mode 100644 index 000000000..8e65d72f4 --- /dev/null +++ b/lerobot/configs/env/moss_real.yaml @@ -0,0 +1,10 @@ +# @package _global_ + +fps: 30 + +env: + name: real_world + task: null + state_dim: 6 + action_dim: 6 + fps: ${fps} diff --git a/lerobot/configs/env/so100_real.yaml b/lerobot/configs/env/so100_real.yaml new file mode 100644 index 000000000..8e65d72f4 --- /dev/null +++ b/lerobot/configs/env/so100_real.yaml @@ -0,0 +1,10 @@ +# @package _global_ + +fps: 30 + +env: + name: real_world + task: null + state_dim: 6 + action_dim: 6 + fps: ${fps} diff --git a/lerobot/configs/policy/act_real.yaml b/lerobot/configs/policy/act_aloha_real.yaml similarity index 76% rename from lerobot/configs/policy/act_real.yaml rename to lerobot/configs/policy/act_aloha_real.yaml index 058104f4d..7c8094da1 100644 --- a/lerobot/configs/policy/act_real.yaml +++ b/lerobot/configs/policy/act_aloha_real.yaml @@ -1,16 +1,22 @@ # @package _global_ -# Use `act_real.yaml` to train on real-world Aloha/Aloha2 datasets. -# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, images, -# cam_low) instead of 1 camera (i.e. top). Also, `training.eval_freq` is set to -1. This config is used -# to evaluate checkpoints at a certain frequency of training steps. When it is set to -1, it deactivates evaluation. -# This is because real-world evaluation is done through [dora-lerobot](https://github.com/dora-rs/dora-lerobot). -# Look at its README for more information on how to evaluate a checkpoint in the real-world. +# Use `act_aloha_real.yaml` to train on real-world datasets collected on Aloha or Aloha-2 robots. +# Compared to `act.yaml`, it contains 4 cameras (i.e. cam_right_wrist, cam_left_wrist, cam_high, cam_low) instead of 1 camera (i.e. top). +# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. +# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. +# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. # -# Example of usage for training: +# Example of usage for training and inference with `control_robot.py`: # ```bash # python lerobot/scripts/train.py \ -# policy=act_real \ +# policy=act_aloha_real \ +# env=aloha_real +# ``` +# +# Example of usage for training and inference with [Dora-rs](https://github.com/dora-rs/dora-lerobot): +# ```bash +# python lerobot/scripts/train.py \ +# policy=act_aloha_real \ # env=dora_aloha_real # ``` @@ -36,10 +42,11 @@ override_dataset_stats: std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 100000 + offline_steps: 80000 online_steps: 0 eval_freq: -1 - save_freq: 20000 + save_freq: 10000 + log_freq: 100 save_checkpoint: true batch_size: 8 @@ -62,7 +69,7 @@ policy: # Input / output structure. n_obs_steps: 1 - chunk_size: 100 # chunk_size + chunk_size: 100 n_action_steps: 100 input_shapes: diff --git a/lerobot/configs/policy/act_koch_real.yaml b/lerobot/configs/policy/act_koch_real.yaml index fd4bf3b59..6ddebab14 100644 --- a/lerobot/configs/policy/act_koch_real.yaml +++ b/lerobot/configs/policy/act_koch_real.yaml @@ -95,7 +95,7 @@ policy: n_vae_encoder_layers: 4 # Inference. - temporal_ensemble_momentum: null + temporal_ensemble_coeff: null # Training and loss computation. dropout: 0.1 diff --git a/lerobot/configs/policy/act_real_no_state.yaml b/lerobot/configs/policy/act_moss_real.yaml similarity index 57% rename from lerobot/configs/policy/act_real_no_state.yaml rename to lerobot/configs/policy/act_moss_real.yaml index 082610503..d996c3597 100644 --- a/lerobot/configs/policy/act_real_no_state.yaml +++ b/lerobot/configs/policy/act_moss_real.yaml @@ -1,43 +1,37 @@ # @package _global_ -# Use `act_real_no_state.yaml` to train on real-world Aloha/Aloha2 datasets when cameras are moving (e.g. wrist cameras) -# Compared to `act_real.yaml`, it is camera only and does not use the state as input which is vector of robot joint positions. -# We validated experimentaly that not using state reaches better success rate. Our hypothesis is that `act_real.yaml` might -# overfits to the state, because the images are more complex to learn from since they are moving. +# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots. +# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top). +# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. +# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. +# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. # # Example of usage for training: # ```bash # python lerobot/scripts/train.py \ -# policy=act_real_no_state \ -# env=dora_aloha_real +# policy=act_koch_real \ +# env=koch_real # ``` seed: 1000 -dataset_repo_id: lerobot/aloha_static_vinh_cup +dataset_repo_id: lerobot/moss_pick_place_lego override_dataset_stats: - observation.images.cam_right_wrist: + observation.images.laptop: # stats from imagenet, since we use a pretrained vision model mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_left_wrist: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_high: - # stats from imagenet, since we use a pretrained vision model - mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) - std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) - observation.images.cam_low: + observation.images.phone: # stats from imagenet, since we use a pretrained vision model mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) training: - offline_steps: 100000 + offline_steps: 80000 online_steps: 0 eval_freq: -1 - save_freq: 20000 + save_freq: 10000 + log_freq: 100 save_checkpoint: true batch_size: 8 @@ -60,24 +54,22 @@ policy: # Input / output structure. n_obs_steps: 1 - chunk_size: 100 # chunk_size + chunk_size: 100 n_action_steps: 100 input_shapes: # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? - observation.images.cam_right_wrist: [3, 480, 640] - observation.images.cam_left_wrist: [3, 480, 640] - observation.images.cam_high: [3, 480, 640] - observation.images.cam_low: [3, 480, 640] + observation.images.laptop: [3, 480, 640] + observation.images.phone: [3, 480, 640] + observation.state: ["${env.state_dim}"] output_shapes: action: ["${env.action_dim}"] # Normalization / Unnormalization input_normalization_modes: - observation.images.cam_right_wrist: mean_std - observation.images.cam_left_wrist: mean_std - observation.images.cam_high: mean_std - observation.images.cam_low: mean_std + observation.images.laptop: mean_std + observation.images.phone: mean_std + observation.state: mean_std output_normalization_modes: action: mean_std diff --git a/lerobot/configs/policy/act_pusht.yaml b/lerobot/configs/policy/act_pusht.yaml new file mode 100644 index 000000000..4963e11c0 --- /dev/null +++ b/lerobot/configs/policy/act_pusht.yaml @@ -0,0 +1,87 @@ +# @package _global_ + +# Change the seed to match what PushT eval uses +# (to avoid evaluating on seeds used for generating the training data). +seed: 100000 +# Change the dataset repository to the PushT one. +dataset_repo_id: lerobot/pusht + +override_dataset_stats: + observation.image: + # stats from imagenet, since we use a pretrained vision model + mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) + std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) + +training: + offline_steps: 80000 + online_steps: 0 + eval_freq: 10000 + save_freq: 100000 + log_freq: 250 + save_model: true + + batch_size: 8 + lr: 1e-5 + lr_backbone: 1e-5 + weight_decay: 1e-4 + grad_clip_norm: 10 + online_steps_between_rollouts: 1 + + delta_timestamps: + action: "[i / ${fps} for i in range(${policy.chunk_size})]" + +eval: + n_episodes: 50 + batch_size: 50 + +# See `configuration_act.py` for more details. +policy: + name: act + + # Input / output structure. + n_obs_steps: 1 + chunk_size: 100 # chunk_size + n_action_steps: 100 + + input_shapes: + observation.image: [3, 96, 96] + observation.state: ["${env.state_dim}"] + output_shapes: + action: ["${env.action_dim}"] + + # Normalization / Unnormalization + input_normalization_modes: + observation.image: mean_std + # Use min_max normalization just because it's more standard. + observation.state: min_max + output_normalization_modes: + # Use min_max normalization just because it's more standard. + action: min_max + + # Architecture. + # Vision backbone. + vision_backbone: resnet18 + pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 + replace_final_stride_with_dilation: false + # Transformer layers. + pre_norm: false + dim_model: 512 + n_heads: 8 + dim_feedforward: 3200 + feedforward_activation: relu + n_encoder_layers: 4 + # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code + # that means only the first layer is used. Here we match the original implementation by setting this to 1. + # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. + n_decoder_layers: 1 + # VAE. + use_vae: true + latent_dim: 32 + n_vae_encoder_layers: 4 + + # Inference. + temporal_ensemble_coeff: null + + # Training and loss computation. + dropout: 0.1 + kl_weight: 10.0 diff --git a/lerobot/configs/policy/act_so100_real.yaml b/lerobot/configs/policy/act_so100_real.yaml new file mode 100644 index 000000000..cf5b1f147 --- /dev/null +++ b/lerobot/configs/policy/act_so100_real.yaml @@ -0,0 +1,102 @@ +# @package _global_ + +# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots. +# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top). +# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps. +# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script. +# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy. +# +# Example of usage for training: +# ```bash +# python lerobot/scripts/train.py \ +# policy=act_koch_real \ +# env=koch_real +# ``` + +seed: 1000 +dataset_repo_id: lerobot/so100_pick_place_lego + +override_dataset_stats: + observation.images.laptop: + # stats from imagenet, since we use a pretrained vision model + mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) + std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) + observation.images.phone: + # stats from imagenet, since we use a pretrained vision model + mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1) + std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1) + +training: + offline_steps: 80000 + online_steps: 0 + eval_freq: -1 + save_freq: 10000 + log_freq: 100 + save_checkpoint: true + + batch_size: 8 + lr: 1e-5 + lr_backbone: 1e-5 + weight_decay: 1e-4 + grad_clip_norm: 10 + online_steps_between_rollouts: 1 + + delta_timestamps: + action: "[i / ${fps} for i in range(${policy.chunk_size})]" + +eval: + n_episodes: 50 + batch_size: 50 + +# See `configuration_act.py` for more details. +policy: + name: act + + # Input / output structure. + n_obs_steps: 1 + chunk_size: 100 + n_action_steps: 100 + + input_shapes: + # TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env? + observation.images.laptop: [3, 480, 640] + observation.images.phone: [3, 480, 640] + observation.state: ["${env.state_dim}"] + output_shapes: + action: ["${env.action_dim}"] + + # Normalization / Unnormalization + input_normalization_modes: + observation.images.laptop: mean_std + observation.images.phone: mean_std + observation.state: mean_std + output_normalization_modes: + action: mean_std + + # Architecture. + # Vision backbone. + vision_backbone: resnet18 + pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 + replace_final_stride_with_dilation: false + # Transformer layers. + pre_norm: false + dim_model: 512 + n_heads: 8 + dim_feedforward: 3200 + feedforward_activation: relu + n_encoder_layers: 4 + # Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code + # that means only the first layer is used. Here we match the original implementation by setting this to 1. + # See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521. + n_decoder_layers: 1 + # VAE. + use_vae: true + latent_dim: 32 + n_vae_encoder_layers: 4 + + # Inference. + temporal_ensemble_coeff: null + + # Training and loss computation. + dropout: 0.1 + kl_weight: 10.0 diff --git a/lerobot/configs/robot/aloha.yaml b/lerobot/configs/robot/aloha.yaml index 938fa2e3d..d8bca515f 100644 --- a/lerobot/configs/robot/aloha.yaml +++ b/lerobot/configs/robot/aloha.yaml @@ -1,11 +1,13 @@ -# Aloha: A Low-Cost Hardware for Bimanual Teleoperation +# [Aloha: A Low-Cost Hardware for Bimanual Teleoperation](https://www.trossenrobotics.com/aloha-stationary) # https://aloha-2.github.io -# https://www.trossenrobotics.com/aloha-stationary # Requires installing extras packages # With pip: `pip install -e ".[dynamixel intelrealsense]"` # With poetry: `poetry install --sync --extras "dynamixel intelrealsense"` +# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/9_use_aloha.md) + + _target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot robot_type: aloha # Specific to Aloha, LeRobot comes with default calibration files. Assuming the motors have been @@ -91,25 +93,25 @@ follower_arms: cameras: cam_high: _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - camera_index: 128422271347 + serial_number: 128422271347 fps: 30 width: 640 height: 480 cam_low: _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - camera_index: 130322270656 + serial_number: 130322270656 fps: 30 width: 640 height: 480 cam_left_wrist: _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - camera_index: 218622272670 + serial_number: 218622272670 fps: 30 width: 640 height: 480 cam_right_wrist: _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera - camera_index: 130322272300 + serial_number: 130322272300 fps: 30 width: 640 height: 480 diff --git a/lerobot/configs/robot/koch_bimanual.yaml b/lerobot/configs/robot/koch_bimanual.yaml index 7f8138675..b551d15de 100644 --- a/lerobot/configs/robot/koch_bimanual.yaml +++ b/lerobot/configs/robot/koch_bimanual.yaml @@ -1,5 +1,5 @@ _target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot -robot_type: koch +robot_type: koch_bimanual calibration_dir: .cache/calibration/koch_bimanual # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. diff --git a/lerobot/configs/robot/moss.yaml b/lerobot/configs/robot/moss.yaml new file mode 100644 index 000000000..8a9019851 --- /dev/null +++ b/lerobot/configs/robot/moss.yaml @@ -0,0 +1,56 @@ +# [Moss v1 robot arm](https://github.com/jess-moss/moss-robot-arms) + +# Requires installing extras packages +# With pip: `pip install -e ".[feetech]"` +# With poetry: `poetry install --sync --extras "feetech"` + +# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/11_use_moss.md) + +_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot +robot_type: moss +calibration_dir: .cache/calibration/moss + +# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. +# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as +# the number of motors in your follower arms. +max_relative_target: null + +leader_arms: + main: + _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus + port: /dev/tty.usbmodem58760431091 + motors: + # name: (index, model) + shoulder_pan: [1, "sts3215"] + shoulder_lift: [2, "sts3215"] + elbow_flex: [3, "sts3215"] + wrist_flex: [4, "sts3215"] + wrist_roll: [5, "sts3215"] + gripper: [6, "sts3215"] + +follower_arms: + main: + _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus + port: /dev/tty.usbmodem58760431191 + motors: + # name: (index, model) + shoulder_pan: [1, "sts3215"] + shoulder_lift: [2, "sts3215"] + elbow_flex: [3, "sts3215"] + wrist_flex: [4, "sts3215"] + wrist_roll: [5, "sts3215"] + gripper: [6, "sts3215"] + +cameras: + laptop: + _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera + camera_index: 0 + fps: 30 + width: 640 + height: 480 + phone: + _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera + camera_index: 1 + fps: 30 + width: 640 + height: 480 diff --git a/lerobot/configs/robot/so100.yaml b/lerobot/configs/robot/so100.yaml new file mode 100644 index 000000000..ec6f3e3fe --- /dev/null +++ b/lerobot/configs/robot/so100.yaml @@ -0,0 +1,56 @@ +# [SO-100 robot arm](https://github.com/TheRobotStudio/SO-ARM100) + +# Requires installing extras packages +# With pip: `pip install -e ".[feetech]"` +# With poetry: `poetry install --sync --extras "feetech"` + +# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md) + +_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot +robot_type: so100 +calibration_dir: .cache/calibration/so100 + +# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. +# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as +# the number of motors in your follower arms. +max_relative_target: null + +leader_arms: + main: + _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus + port: /dev/tty.usbmodem585A0077581 + motors: + # name: (index, model) + shoulder_pan: [1, "sts3215"] + shoulder_lift: [2, "sts3215"] + elbow_flex: [3, "sts3215"] + wrist_flex: [4, "sts3215"] + wrist_roll: [5, "sts3215"] + gripper: [6, "sts3215"] + +follower_arms: + main: + _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus + port: /dev/tty.usbmodem585A0080971 + motors: + # name: (index, model) + shoulder_pan: [1, "sts3215"] + shoulder_lift: [2, "sts3215"] + elbow_flex: [3, "sts3215"] + wrist_flex: [4, "sts3215"] + wrist_roll: [5, "sts3215"] + gripper: [6, "sts3215"] + +cameras: + laptop: + _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera + camera_index: 0 + fps: 30 + width: 640 + height: 480 + phone: + _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera + camera_index: 1 + fps: 30 + width: 640 + height: 480 diff --git a/lerobot/configs/robot/stretch.yaml b/lerobot/configs/robot/stretch.yaml new file mode 100644 index 000000000..e29966b6f --- /dev/null +++ b/lerobot/configs/robot/stretch.yaml @@ -0,0 +1,33 @@ +# [Stretch3 from Hello Robot](https://hello-robot.com/stretch-3-product) + +# Requires installing extras packages +# With pip: `pip install -e ".[stretch]"` +# With poetry: `poetry install --sync --extras "stretch"` + +# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/8_use_stretch.md) + + +_target_: lerobot.common.robot_devices.robots.stretch.StretchRobot +robot_type: stretch3 + +cameras: + navigation: + _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera + camera_index: /dev/hello-nav-head-camera + fps: 10 + width: 1280 + height: 720 + rotation: -90 + head: + _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name + name: Intel RealSense D435I + fps: 30 + width: 640 + height: 480 + rotation: 90 + wrist: + _target_: lerobot.common.robot_devices.cameras.intelrealsense.IntelRealSenseCamera.init_from_name + name: Intel RealSense D405 + fps: 30 + width: 640 + height: 480 diff --git a/lerobot/scripts/configure_motor.py b/lerobot/scripts/configure_motor.py new file mode 100644 index 000000000..18707397f --- /dev/null +++ b/lerobot/scripts/configure_motor.py @@ -0,0 +1,145 @@ +""" +This script configure a single motor at a time to a given ID and baudrate. + +Example of usage: +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem585A0080521 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 1 +``` +""" + +import argparse +import time + + +def configure_motor(port, brand, model, motor_idx_des, baudrate_des): + if brand == "feetech": + from lerobot.common.robot_devices.motors.feetech import MODEL_BAUDRATE_TABLE + from lerobot.common.robot_devices.motors.feetech import ( + SCS_SERIES_BAUDRATE_TABLE as SERIES_BAUDRATE_TABLE, + ) + from lerobot.common.robot_devices.motors.feetech import FeetechMotorsBus as MotorsBusClass + elif brand == "dynamixel": + from lerobot.common.robot_devices.motors.dynamixel import MODEL_BAUDRATE_TABLE + from lerobot.common.robot_devices.motors.dynamixel import ( + X_SERIES_BAUDRATE_TABLE as SERIES_BAUDRATE_TABLE, + ) + from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus as MotorsBusClass + else: + raise ValueError( + f"Currently we do not support this motor brand: {brand}. We currently support feetech and dynamixel motors." + ) + + # Check if the provided model exists in the model_baud_rate_table + if model not in MODEL_BAUDRATE_TABLE: + raise ValueError( + f"Invalid model '{model}' for brand '{brand}'. Supported models: {list(MODEL_BAUDRATE_TABLE.keys())}" + ) + + # Setup motor names, indices, and models + motor_name = "motor" + motor_index_arbitrary = motor_idx_des # Use the motor ID passed via argument + motor_model = model # Use the motor model passed via argument + + # Initialize the MotorBus with the correct port and motor configurations + motor_bus = MotorsBusClass(port=port, motors={motor_name: (motor_index_arbitrary, motor_model)}) + + # Try to connect to the motor bus and handle any connection-specific errors + try: + motor_bus.connect() + print(f"Connected on port {motor_bus.port}") + except OSError as e: + print(f"Error occurred when connecting to the motor bus: {e}") + return + + # Motor bus is connected, proceed with the rest of the operations + try: + print("Scanning all baudrates and motor indices") + all_baudrates = set(SERIES_BAUDRATE_TABLE.values()) + motor_index = -1 # Set the motor index to an out-of-range value. + + for baudrate in all_baudrates: + motor_bus.set_bus_baudrate(baudrate) + present_ids = motor_bus.find_motor_indices(list(range(1, 10))) + if len(present_ids) > 1: + raise ValueError( + "Error: More than one motor ID detected. This script is designed to only handle one motor at a time. Please disconnect all but one motor." + ) + + if len(present_ids) == 1: + if motor_index != -1: + raise ValueError( + "Error: More than one motor ID detected. This script is designed to only handle one motor at a time. Please disconnect all but one motor." + ) + motor_index = present_ids[0] + + if motor_index == -1: + raise ValueError("No motors detected. Please ensure you have one motor connected.") + + print(f"Motor index found at: {motor_index}") + + if brand == "feetech": + # Allows ID and BAUDRATE to be written in memory + motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Lock", 0) + + if baudrate != baudrate_des: + print(f"Setting its baudrate to {baudrate_des}") + baudrate_idx = list(SERIES_BAUDRATE_TABLE.values()).index(baudrate_des) + + # The write can fail, so we allow retries + motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Baud_Rate", baudrate_idx) + time.sleep(0.5) + motor_bus.set_bus_baudrate(baudrate_des) + present_baudrate_idx = motor_bus.read_with_motor_ids( + motor_bus.motor_models, motor_index, "Baud_Rate", num_retry=2 + ) + + if present_baudrate_idx != baudrate_idx: + raise OSError("Failed to write baudrate.") + + print(f"Setting its index to desired index {motor_idx_des}") + motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "Lock", 0) + motor_bus.write_with_motor_ids(motor_bus.motor_models, motor_index, "ID", motor_idx_des) + + present_idx = motor_bus.read_with_motor_ids(motor_bus.motor_models, motor_idx_des, "ID", num_retry=2) + if present_idx != motor_idx_des: + raise OSError("Failed to write index.") + + if brand == "feetech": + # Set Maximum_Acceleration to 254 to speedup acceleration and deceleration of + # the motors. Note: this configuration is not in the official STS3215 Memory Table + motor_bus.write("Lock", 0) + motor_bus.write("Maximum_Acceleration", 254) + + motor_bus.write("Goal_Position", 2048) + time.sleep(4) + print("Present Position", motor_bus.read("Present_Position")) + + motor_bus.write("Offset", 0) + time.sleep(4) + print("Offset", motor_bus.read("Offset")) + + except Exception as e: + print(f"Error occurred during motor configuration: {e}") + + finally: + motor_bus.disconnect() + print("Disconnected from motor bus.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--port", type=str, required=True, help="Motors bus port (e.g. dynamixel,feetech)") + parser.add_argument("--brand", type=str, required=True, help="Motor brand (e.g. dynamixel,feetech)") + parser.add_argument("--model", type=str, required=True, help="Motor model (e.g. xl330-m077,sts3215)") + parser.add_argument("--ID", type=int, required=True, help="Desired ID of the current motor (e.g. 1,2,3)") + parser.add_argument( + "--baudrate", type=int, default=1000000, help="Desired baudrate for the motor (default: 1000000)" + ) + args = parser.parse_args() + + configure_motor(args.port, args.brand, args.model, args.ID, args.baudrate) diff --git a/lerobot/scripts/control_robot.py b/lerobot/scripts/control_robot.py index a6506a3fe..563023f48 100644 --- a/lerobot/scripts/control_robot.py +++ b/lerobot/scripts/control_robot.py @@ -29,7 +29,6 @@ ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ - --root tmp/data \ --repo-id $USER/koch_test \ --num-episodes 1 \ --run-compute-stats 0 @@ -38,7 +37,6 @@ - Visualize dataset: ```bash python lerobot/scripts/visualize_dataset.py \ - --root tmp/data \ --repo-id $USER/koch_test \ --episode-index 0 ``` @@ -47,7 +45,6 @@ ```bash python lerobot/scripts/control_robot.py replay \ --fps 30 \ - --root tmp/data \ --repo-id $USER/koch_test \ --episode 0 ``` @@ -57,7 +54,6 @@ ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ - --root data \ --repo-id $USER/koch_pick_place_lego \ --num-episodes 50 \ --warmup-time-s 2 \ @@ -77,7 +73,7 @@ - Train on this dataset with the ACT policy: ```bash -DATA_DIR=data python lerobot/scripts/train.py \ +python lerobot/scripts/train.py \ policy=act_koch_real \ env=koch_real \ dataset_repo_id=$USER/koch_pick_place_lego \ @@ -88,7 +84,6 @@ ```bash python lerobot/scripts/control_robot.py record \ --fps 30 \ - --root data \ --repo-id $USER/eval_act_koch_real \ --num-episodes 10 \ --warmup-time-s 2 \ @@ -99,161 +94,51 @@ """ import argparse -import concurrent.futures -import json import logging -import os -import platform -import shutil import time -import traceback -from contextlib import nullcontext -from functools import cache from pathlib import Path - -import cv2 -import torch -import tqdm -from omegaconf import DictConfig -from PIL import Image -from termcolor import colored +from typing import List # from safetensors.torch import load_file, save_file -from lerobot.common.datasets.compute_stats import compute_stats -from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset -from lerobot.common.datasets.push_dataset_to_hub.aloha_hdf5_format import to_hf_dataset -from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, get_default_encoding -from lerobot.common.datasets.utils import calculate_episode_data_index, create_branch -from lerobot.common.datasets.video_utils import encode_video_frames -from lerobot.common.policies.factory import make_policy -from lerobot.common.robot_devices.robots.factory import make_robot -from lerobot.common.robot_devices.robots.utils import Robot, get_arm_id -from lerobot.common.robot_devices.utils import busy_wait -from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed -from lerobot.scripts.eval import get_pretrained_policy_path -from lerobot.scripts.push_dataset_to_hub import ( - push_dataset_card_to_hub, - push_meta_data_to_hub, - push_videos_to_hub, - save_meta_data, +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.robot_devices.control_utils import ( + control_loop, + has_method, + init_keyboard_listener, + init_policy, + log_control_info, + record_episode, + reset_environment, + sanity_check_dataset_name, + sanity_check_dataset_robot_compatibility, + stop_recording, + warmup_record, ) - -######################################################################################## -# Utilities -######################################################################################## - - -def say(text, blocking=False): - # Check if mac, linux, or windows. - if platform.system() == "Darwin": - cmd = f'say "{text}"' - elif platform.system() == "Linux": - cmd = f'spd-say "{text}"' - elif platform.system() == "Windows": - cmd = ( - 'PowerShell -Command "Add-Type -AssemblyName System.Speech; ' - f"(New-Object System.Speech.Synthesis.SpeechSynthesizer).Speak('{text}')\"" - ) - - if not blocking and platform.system() in ["Darwin", "Linux"]: - # TODO(rcadene): Make it work for Windows - # Use the ampersand to run command in the background - cmd += " &" - - os.system(cmd) - - -def save_image(img_tensor, key, frame_index, episode_index, videos_dir): - img = Image.fromarray(img_tensor.numpy()) - path = videos_dir / f"{key}_episode_{episode_index:06d}" / f"frame_{frame_index:06d}.png" - path.parent.mkdir(parents=True, exist_ok=True) - img.save(str(path), quality=100) - - -def none_or_int(value): - if value == "None": - return None - return int(value) - - -def log_control_info(robot, dt_s, episode_index=None, frame_index=None, fps=None): - log_items = [] - if episode_index is not None: - log_items.append(f"ep:{episode_index}") - if frame_index is not None: - log_items.append(f"frame:{frame_index}") - - def log_dt(shortname, dt_val_s): - nonlocal log_items, fps - info_str = f"{shortname}:{dt_val_s * 1000:5.2f} ({1/ dt_val_s:3.1f}hz)" - if fps is not None: - actual_fps = 1 / dt_val_s - if actual_fps < fps - 1: - info_str = colored(info_str, "yellow") - log_items.append(info_str) - - # total step time displayed in milliseconds and its frequency - log_dt("dt", dt_s) - - for name in robot.leader_arms: - key = f"read_leader_{name}_pos_dt_s" - if key in robot.logs: - log_dt("dtRlead", robot.logs[key]) - - for name in robot.follower_arms: - key = f"write_follower_{name}_goal_pos_dt_s" - if key in robot.logs: - log_dt("dtWfoll", robot.logs[key]) - - key = f"read_follower_{name}_pos_dt_s" - if key in robot.logs: - log_dt("dtRfoll", robot.logs[key]) - - for name in robot.cameras: - key = f"read_camera_{name}_dt_s" - if key in robot.logs: - log_dt(f"dtR{name}", robot.logs[key]) - - info_str = " ".join(log_items) - logging.info(info_str) - - -@cache -def is_headless(): - """Detects if python is running without a monitor.""" - try: - import pynput # noqa - - return False - except Exception: - print( - "Error trying to import pynput. Switching to headless mode. " - "As a result, the video stream from the cameras won't be shown, " - "and you won't be able to change the control flow with keyboards. " - "For more info, see traceback below.\n" - ) - traceback.print_exc() - print() - return True - +from lerobot.common.robot_devices.robots.factory import make_robot +from lerobot.common.robot_devices.robots.utils import Robot +from lerobot.common.robot_devices.utils import busy_wait, safe_disconnect +from lerobot.common.utils.utils import init_hydra_config, init_logging, log_say, none_or_int ######################################################################################## # Control modes ######################################################################################## +@safe_disconnect def calibrate(robot: Robot, arms: list[str] | None): - available_arms = [] - for name in robot.follower_arms: - arm_id = get_arm_id(name, "follower") - available_arms.append(arm_id) - for name in robot.leader_arms: - arm_id = get_arm_id(name, "leader") - available_arms.append(arm_id) - - unknown_arms = [arm_id for arm_id in arms if arm_id not in available_arms] - - available_arms_str = " ".join(available_arms) + # TODO(aliberts): move this code in robots' classes + if robot.robot_type.startswith("stretch"): + if not robot.is_connected: + robot.connect() + if not robot.is_homed(): + robot.home() + return + + if arms is None: + arms = robot.available_arms + + unknown_arms = [arm_id for arm_id in arms if arm_id not in robot.available_arms] + available_arms_str = " ".join(robot.available_arms) unknown_arms_str = " ".join(unknown_arms) if arms is None or len(arms) == 0: @@ -285,430 +170,193 @@ def calibrate(robot: Robot, arms: list[str] | None): print("Calibration is done! You can now teleoperate and record datasets!") -def teleoperate(robot: Robot, fps: int | None = None, teleop_time_s: float | None = None): - # TODO(rcadene): Add option to record logs - if not robot.is_connected: - robot.connect() - - start_teleop_t = time.perf_counter() - while True: - start_loop_t = time.perf_counter() - robot.teleop_step() - - if fps is not None: - dt_s = time.perf_counter() - start_loop_t - busy_wait(1 / fps - dt_s) - - dt_s = time.perf_counter() - start_loop_t - log_control_info(robot, dt_s, fps=fps) - - if teleop_time_s is not None and time.perf_counter() - start_teleop_t > teleop_time_s: - break +@safe_disconnect +def teleoperate( + robot: Robot, fps: int | None = None, teleop_time_s: float | None = None, display_cameras: bool = False +): + control_loop( + robot, + control_time_s=teleop_time_s, + fps=fps, + teleoperate=True, + display_cameras=display_cameras, + ) +@safe_disconnect def record( robot: Robot, - policy: torch.nn.Module | None = None, - hydra_cfg: DictConfig | None = None, + root: Path, + repo_id: str, + single_task: str, + pretrained_policy_name_or_path: str | None = None, + policy_overrides: List[str] | None = None, fps: int | None = None, - root="data", - repo_id="lerobot/debug", - warmup_time_s=2, - episode_time_s=10, - reset_time_s=5, - num_episodes=50, - video=True, - run_compute_stats=True, - push_to_hub=True, - tags=None, - num_image_writers_per_camera=4, - force_override=False, -): + warmup_time_s: int | float = 2, + episode_time_s: int | float = 10, + reset_time_s: int | float = 5, + num_episodes: int = 50, + video: bool = True, + run_compute_stats: bool = True, + push_to_hub: bool = True, + tags: list[str] | None = None, + num_image_writer_processes: int = 0, + num_image_writer_threads_per_camera: int = 4, + display_cameras: bool = True, + play_sounds: bool = True, + resume: bool = False, + # TODO(rcadene, aliberts): remove local_files_only when refactor with dataset as argument + local_files_only: bool = False, +) -> LeRobotDataset: # TODO(rcadene): Add option to record logs - # TODO(rcadene): Clean this function via decomposition in higher level functions - - _, dataset_name = repo_id.split("/") - if dataset_name.startswith("eval_") and policy is None: - raise ValueError( - f"Your dataset name begins by 'eval_' ({dataset_name}) but no policy is provided ({policy})." + listener = None + events = None + policy = None + device = None + use_amp = None + + if single_task: + task = single_task + else: + raise NotImplementedError("Only single-task recording is supported for now") + + # Load pretrained policy + if pretrained_policy_name_or_path is not None: + policy, policy_fps, device, use_amp = init_policy(pretrained_policy_name_or_path, policy_overrides) + + if fps is None: + fps = policy_fps + logging.warning(f"No fps provided, so using the fps from policy config ({policy_fps}).") + elif fps != policy_fps: + logging.warning( + f"There is a mismatch between the provided fps ({fps}) and the one from policy config ({policy_fps})." + ) + + if resume: + dataset = LeRobotDataset( + repo_id, + root=root, + local_files_only=local_files_only, + ) + dataset.start_image_writer( + num_processes=num_image_writer_processes, + num_threads=num_image_writer_threads_per_camera * len(robot.cameras), + ) + sanity_check_dataset_robot_compatibility(dataset, robot, fps, video) + else: + # Create empty dataset or load existing saved episodes + sanity_check_dataset_name(repo_id, policy) + dataset = LeRobotDataset.create( + repo_id, + fps, + root=root, + robot=robot, + use_videos=video, + image_writer_processes=num_image_writer_processes, + image_writer_threads=num_image_writer_threads_per_camera * len(robot.cameras), ) - - if not video: - raise NotImplementedError() if not robot.is_connected: robot.connect() - local_dir = Path(root) / repo_id - if local_dir.exists() and force_override: - shutil.rmtree(local_dir) + listener, events = init_keyboard_listener() - episodes_dir = local_dir / "episodes" - episodes_dir.mkdir(parents=True, exist_ok=True) + # Execute a few seconds without recording to: + # 1. teleoperate the robot to move it in starting position if no policy provided, + # 2. give times to the robot devices to connect and start synchronizing, + # 3. place the cameras windows on screen + enable_teleoperation = policy is None + log_say("Warmup record", play_sounds) + warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps) - videos_dir = local_dir / "videos" - videos_dir.mkdir(parents=True, exist_ok=True) + if has_method(robot, "teleop_safety_stop"): + robot.teleop_safety_stop() - # Logic to resume data recording - rec_info_path = episodes_dir / "data_recording_info.json" - if rec_info_path.exists(): - with open(rec_info_path) as f: - rec_info = json.load(f) - episode_index = rec_info["last_episode_index"] + 1 - else: - episode_index = 0 + recorded_episodes = 0 + while True: + if recorded_episodes >= num_episodes: + break - if is_headless(): - logging.info( - "Headless environment detected. On-screen cameras display and keyboard inputs will not be available." + # TODO(aliberts): add task prompt for multitask here. Might need to temporarily disable event if + # input() messes with them. + # if multi_task: + # task = input("Enter your task description: ") + + log_say(f"Recording episode {dataset.num_episodes}", play_sounds) + record_episode( + dataset=dataset, + robot=robot, + events=events, + episode_time_s=episode_time_s, + display_cameras=display_cameras, + policy=policy, + device=device, + use_amp=use_amp, + fps=fps, ) - # Allow to exit early while recording an episode or resetting the environment, - # by tapping the right arrow key '->'. This might require a sudo permission - # to allow your terminal to monitor keyboard events. - exit_early = False - rerecord_episode = False - stop_recording = False - - # Only import pynput if not in a headless environment - if not is_headless(): - from pynput import keyboard - - def on_press(key): - nonlocal exit_early, rerecord_episode, stop_recording - try: - if key == keyboard.Key.right: - print("Right arrow key pressed. Exiting loop...") - exit_early = True - elif key == keyboard.Key.left: - print("Left arrow key pressed. Exiting loop and rerecord the last episode...") - rerecord_episode = True - exit_early = True - elif key == keyboard.Key.esc: - print("Escape key pressed. Stopping data recording...") - stop_recording = True - exit_early = True - except Exception as e: - print(f"Error handling key press: {e}") - - listener = keyboard.Listener(on_press=on_press) - listener.start() - - # Load policy if any - if policy is not None: - # Check device is available - device = get_safe_torch_device(hydra_cfg.device, log=True) - - policy.eval() - policy.to(device) - - torch.backends.cudnn.benchmark = True - torch.backends.cuda.matmul.allow_tf32 = True - set_global_seed(hydra_cfg.seed) - - # override fps using policy fps - fps = hydra_cfg.env.fps - - # Execute a few seconds without recording data, to give times - # to the robot devices to connect and start synchronizing. - timestamp = 0 - start_warmup_t = time.perf_counter() - is_warmup_print = False - while timestamp < warmup_time_s: - if not is_warmup_print: - logging.info("Warming up (no data recording)") - say("Warming up") - is_warmup_print = True - - start_loop_t = time.perf_counter() - - if policy is None: - observation, action = robot.teleop_step(record_data=True) - else: - observation = robot.capture_observation() - - if not is_headless(): - image_keys = [key for key in observation if "image" in key] - for key in image_keys: - cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) - cv2.waitKey(1) - - dt_s = time.perf_counter() - start_loop_t - busy_wait(1 / fps - dt_s) - - dt_s = time.perf_counter() - start_loop_t - log_control_info(robot, dt_s, fps=fps) + # Execute a few seconds without recording to give time to manually reset the environment + # Current code logic doesn't allow to teleoperate during this time. + # TODO(rcadene): add an option to enable teleoperation during reset + # Skip reset for the last episode to be recorded + if not events["stop_recording"] and ( + (dataset.num_episodes < num_episodes - 1) or events["rerecord_episode"] + ): + log_say("Reset the environment", play_sounds) + reset_environment(robot, events, reset_time_s) + + if events["rerecord_episode"]: + log_say("Re-record episode", play_sounds) + events["rerecord_episode"] = False + events["exit_early"] = False + dataset.clear_episode_buffer() + continue + + dataset.save_episode(task) + recorded_episodes += 1 + + if events["stop_recording"]: + break - timestamp = time.perf_counter() - start_warmup_t - - # Save images using threads to reach high fps (30 and more) - # Using `with` to exist smoothly if an execption is raised. - futures = [] - num_image_writers = num_image_writers_per_camera * len(robot.cameras) - with concurrent.futures.ThreadPoolExecutor(max_workers=num_image_writers) as executor: - # Start recording all episodes - while episode_index < num_episodes: - logging.info(f"Recording episode {episode_index}") - say(f"Recording episode {episode_index}") - ep_dict = {} - frame_index = 0 - timestamp = 0 - start_episode_t = time.perf_counter() - while timestamp < episode_time_s: - start_loop_t = time.perf_counter() - - if policy is None: - observation, action = robot.teleop_step(record_data=True) - else: - observation = robot.capture_observation() - - image_keys = [key for key in observation if "image" in key] - not_image_keys = [key for key in observation if "image" not in key] - - for key in image_keys: - futures += [ - executor.submit( - save_image, observation[key], key, frame_index, episode_index, videos_dir - ) - ] - - if not is_headless(): - image_keys = [key for key in observation if "image" in key] - for key in image_keys: - cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) - cv2.waitKey(1) - - for key in not_image_keys: - if key not in ep_dict: - ep_dict[key] = [] - ep_dict[key].append(observation[key]) - - if policy is not None: - with ( - torch.inference_mode(), - torch.autocast(device_type=device.type) - if device.type == "cuda" and hydra_cfg.use_amp - else nullcontext(), - ): - # Convert to pytorch format: channel first and float32 in [0,1] with batch dimension - for name in observation: - if "image" in name: - observation[name] = observation[name].type(torch.float32) / 255 - observation[name] = observation[name].permute(2, 0, 1).contiguous() - observation[name] = observation[name].unsqueeze(0) - observation[name] = observation[name].to(device) - - # Compute the next action with the policy - # based on the current observation - action = policy.select_action(observation) - - # Remove batch dimension - action = action.squeeze(0) - - # Move to cpu, if not already the case - action = action.to("cpu") - - # Order the robot to move - action_sent = robot.send_action(action) - - # Action can eventually be clipped using `max_relative_target`, - # so action actually sent is saved in the dataset. - action = {"action": action_sent} - - for key in action: - if key not in ep_dict: - ep_dict[key] = [] - ep_dict[key].append(action[key]) - - frame_index += 1 - - dt_s = time.perf_counter() - start_loop_t - busy_wait(1 / fps - dt_s) - - dt_s = time.perf_counter() - start_loop_t - log_control_info(robot, dt_s, fps=fps) - - timestamp = time.perf_counter() - start_episode_t - if exit_early: - exit_early = False - break - - if not stop_recording: - # Start resetting env while the executor are finishing - logging.info("Reset the environment") - say("Reset the environment") - - timestamp = 0 - start_vencod_t = time.perf_counter() - - # During env reset we save the data and encode the videos - num_frames = frame_index - - for key in image_keys: - tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}" - fname = f"{key}_episode_{episode_index:06d}.mp4" - video_path = local_dir / "videos" / fname - if video_path.exists(): - video_path.unlink() - # Store the reference to the video frame, even tho the videos are not yet encoded - ep_dict[key] = [] - for i in range(num_frames): - ep_dict[key].append({"path": f"videos/{fname}", "timestamp": i / fps}) - - for key in not_image_keys: - ep_dict[key] = torch.stack(ep_dict[key]) - - for key in action: - ep_dict[key] = torch.stack(ep_dict[key]) - - ep_dict["episode_index"] = torch.tensor([episode_index] * num_frames) - ep_dict["frame_index"] = torch.arange(0, num_frames, 1) - ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps - - done = torch.zeros(num_frames, dtype=torch.bool) - done[-1] = True - ep_dict["next.done"] = done - - ep_path = episodes_dir / f"episode_{episode_index}.pth" - print("Saving episode dictionary...") - torch.save(ep_dict, ep_path) - - rec_info = { - "last_episode_index": episode_index, - } - with open(rec_info_path, "w") as f: - json.dump(rec_info, f) - - is_last_episode = stop_recording or (episode_index == (num_episodes - 1)) - - # Wait if necessary - with tqdm.tqdm(total=reset_time_s, desc="Waiting") as pbar: - while timestamp < reset_time_s and not is_last_episode: - time.sleep(1) - timestamp = time.perf_counter() - start_vencod_t - pbar.update(1) - if exit_early: - exit_early = False - break - - # Skip updating episode index which forces re-recording episode - if rerecord_episode: - rerecord_episode = False - continue - - episode_index += 1 - - if is_last_episode: - logging.info("Done recording") - say("Done recording", blocking=True) - if not is_headless(): - listener.stop() - - logging.info("Waiting for threads writing the images on disk to terminate...") - for _ in tqdm.tqdm( - concurrent.futures.as_completed(futures), total=len(futures), desc="Writting images" - ): - pass - break + log_say("Stop recording", play_sounds, blocking=True) + stop_recording(robot, listener, display_cameras) - robot.disconnect() - if not is_headless(): - cv2.destroyAllWindows() - - num_episodes = episode_index - - logging.info("Encoding videos") - say("Encoding videos") - # Use ffmpeg to convert frames stored as png into mp4 videos - for episode_index in tqdm.tqdm(range(num_episodes)): - for key in image_keys: - tmp_imgs_dir = videos_dir / f"{key}_episode_{episode_index:06d}" - fname = f"{key}_episode_{episode_index:06d}.mp4" - video_path = local_dir / "videos" / fname - if video_path.exists(): - # Skip if video is already encoded. Could be the case when resuming data recording. - continue - # note: `encode_video_frames` is a blocking call. Making it asynchronous shouldn't speedup encoding, - # since video encoding with ffmpeg is already using multithreading. - encode_video_frames(tmp_imgs_dir, video_path, fps, overwrite=True) - shutil.rmtree(tmp_imgs_dir) - - logging.info("Concatenating episodes") - ep_dicts = [] - for episode_index in tqdm.tqdm(range(num_episodes)): - ep_path = episodes_dir / f"episode_{episode_index}.pth" - ep_dict = torch.load(ep_path) - ep_dicts.append(ep_dict) - data_dict = concatenate_episodes(ep_dicts) - - total_frames = data_dict["frame_index"].shape[0] - data_dict["index"] = torch.arange(0, total_frames, 1) - - hf_dataset = to_hf_dataset(data_dict, video) - episode_data_index = calculate_episode_data_index(hf_dataset) - info = { - "codebase_version": CODEBASE_VERSION, - "fps": fps, - "video": video, - } - if video: - info["encoding"] = get_default_encoding() - - lerobot_dataset = LeRobotDataset.from_preloaded( - repo_id=repo_id, - hf_dataset=hf_dataset, - episode_data_index=episode_data_index, - info=info, - videos_dir=videos_dir, - ) if run_compute_stats: logging.info("Computing dataset statistics") - say("Computing dataset statistics") - stats = compute_stats(lerobot_dataset) - lerobot_dataset.stats = stats - else: - stats = {} - logging.info("Skipping computation of the dataset statistics") - - hf_dataset = hf_dataset.with_format(None) # to remove transforms that cant be saved - hf_dataset.save_to_disk(str(local_dir / "train")) - meta_data_dir = local_dir / "meta_data" - save_meta_data(info, stats, episode_data_index, meta_data_dir) + dataset.consolidate(run_compute_stats) if push_to_hub: - hf_dataset.push_to_hub(repo_id, revision="main") - push_meta_data_to_hub(repo_id, meta_data_dir, revision="main") - push_dataset_card_to_hub(repo_id, revision="main", tags=tags) - if video: - push_videos_to_hub(repo_id, videos_dir, revision="main") - create_branch(repo_id, repo_type="dataset", branch=CODEBASE_VERSION) + dataset.push_to_hub(tags=tags) - logging.info("Exiting") - say("Exiting") - return lerobot_dataset + log_say("Exiting", play_sounds) + return dataset -def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo_id="lerobot/debug"): +@safe_disconnect +def replay( + robot: Robot, + root: Path, + repo_id: str, + episode: int, + fps: int | None = None, + play_sounds: bool = True, + local_files_only: bool = True, +): + # TODO(rcadene, aliberts): refactor with control_loop, once `dataset` is an instance of LeRobotDataset # TODO(rcadene): Add option to record logs - local_dir = Path(root) / repo_id - if not local_dir.exists(): - raise ValueError(local_dir) - dataset = LeRobotDataset(repo_id, root=root) - items = dataset.hf_dataset.select_columns("action") - from_idx = dataset.episode_data_index["from"][episode].item() - to_idx = dataset.episode_data_index["to"][episode].item() + dataset = LeRobotDataset(repo_id, root=root, episodes=[episode], local_files_only=local_files_only) + actions = dataset.hf_dataset.select_columns("action") if not robot.is_connected: robot.connect() - logging.info("Replaying episode") - say("Replaying episode", blocking=True) - for idx in range(from_idx, to_idx): + log_say("Replaying episode", play_sounds, blocking=True) + for idx in range(dataset.num_frames): start_episode_t = time.perf_counter() - action = items[idx]["action"] + action = actions[idx]["action"] robot.send_action(action) dt_s = time.perf_counter() - start_episode_t @@ -749,15 +397,33 @@ def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo parser_teleop.add_argument( "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" ) + parser_teleop.add_argument( + "--display-cameras", + type=int, + default=1, + help="Display all cameras on screen (set to 1 to display or 0).", + ) parser_record = subparsers.add_parser("record", parents=[base_parser]) + task_args = parser_record.add_mutually_exclusive_group(required=True) parser_record.add_argument( "--fps", type=none_or_int, default=None, help="Frames per second (set to None to disable)" ) + task_args.add_argument( + "--single-task", + type=str, + help="A short but accurate description of the task performed during the recording.", + ) + # TODO(aliberts): add multi-task support + # task_args.add_argument( + # "--multi-task", + # type=int, + # help="You will need to enter the task performed at the start of each episode.", + # ) parser_record.add_argument( "--root", type=Path, - default="data", + default=None, help="Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').", ) parser_record.add_argument( @@ -804,20 +470,31 @@ def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo help="Add tags to your dataset on the hub.", ) parser_record.add_argument( - "--num-image-writers-per-camera", + "--num-image-writer-processes", + type=int, + default=0, + help=( + "Number of subprocesses handling the saving of frames as PNGs. Set to 0 to use threads only; " + "set to ≥1 to use subprocesses, each using threads to write images. The best number of processes " + "and threads depends on your system. We recommend 4 threads per camera with 0 processes. " + "If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses." + ), + ) + parser_record.add_argument( + "--num-image-writer-threads-per-camera", type=int, default=4, help=( "Number of threads writing the frames as png images on disk, per camera. " - "Too much threads might cause unstable teleoperation fps due to main thread being blocked. " + "Too many threads might cause unstable teleoperation fps due to main thread being blocked. " "Not enough threads might cause low camera fps." ), ) parser_record.add_argument( - "--force-override", + "--resume", type=int, default=0, - help="By default, data recording is resumed. When set to 1, delete the local directory and start data recording from scratch.", + help="Resume recording on an existing dataset.", ) parser_record.add_argument( "-p", @@ -842,7 +519,7 @@ def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo parser_replay.add_argument( "--root", type=Path, - default="data", + default=None, help="Root directory where the dataset will be stored locally at '{root}/{repo_id}' (e.g. 'data/hf_username/dataset_name').", ) parser_replay.add_argument( @@ -875,19 +552,7 @@ def replay(robot: Robot, episode: int, fps: int | None = None, root="data", repo teleoperate(robot, **kwargs) elif control_mode == "record": - pretrained_policy_name_or_path = args.pretrained_policy_name_or_path - policy_overrides = args.policy_overrides - del kwargs["pretrained_policy_name_or_path"] - del kwargs["policy_overrides"] - - policy_cfg = None - if pretrained_policy_name_or_path is not None: - pretrained_policy_path = get_pretrained_policy_path(pretrained_policy_name_or_path) - policy_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", policy_overrides) - policy = make_policy(hydra_cfg=policy_cfg, pretrained_policy_name_or_path=pretrained_policy_path) - record(robot, policy, policy_cfg, **kwargs) - else: - record(robot, **kwargs) + record(robot, **kwargs) elif control_mode == "replay": replay(robot, **kwargs) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 0aec84720..cb4c1065e 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -75,6 +75,7 @@ init_hydra_config, init_logging, inside_slurm, + is_launched_with_accelerate, set_global_seed, ) @@ -122,7 +123,6 @@ def rollout( # Reset the policy and environments. policy.reset() - observation, info = env.reset(seed=seeds) if render_callback is not None: render_callback(env) @@ -447,6 +447,7 @@ def main( hydra_cfg_path: str | None = None, out_dir: str | None = None, config_overrides: list[str] | None = None, + accelerator: Callable = None, ): assert (pretrained_policy_path is None) ^ (hydra_cfg_path is None) if pretrained_policy_path is not None: @@ -468,11 +469,11 @@ def main( out_dir = f"outputs/eval/{dt.now().strftime('%Y-%m-%d/%H-%M-%S')}_{hydra_cfg.env.name}_{hydra_cfg.policy.name}" # Check device is available - device = get_safe_torch_device(hydra_cfg.device, log=True) + device = get_safe_torch_device(hydra_cfg.device, log=True, accelerator=accelerator) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True - set_global_seed(hydra_cfg.seed) + set_global_seed(hydra_cfg.seed, accelerator=accelerator) log_output_dir(out_dir) @@ -484,17 +485,25 @@ def main( policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=str(pretrained_policy_path)) else: # Note: We need the dataset stats to pass to the policy's normalization modules. - policy = make_policy(hydra_cfg=hydra_cfg, dataset_stats=make_dataset(hydra_cfg).stats) + policy = make_policy(hydra_cfg=hydra_cfg, dataset_stats=make_dataset(hydra_cfg).meta.stats) assert isinstance(policy, nn.Module) policy.eval() - with torch.no_grad(), torch.autocast(device_type=device.type) if hydra_cfg.use_amp else nullcontext(): + if accelerator: + policy = accelerator.prepare_model(policy).to(device) + + with ( + torch.no_grad(), + torch.autocast(device_type=device.type) + if hydra_cfg.use_amp and accelerator is None + else nullcontext(), + ): info = eval_policy( env, - policy, + policy if accelerator is None else accelerator.unwrap_model(policy, keep_fp32_wrapper=True), hydra_cfg.eval.n_episodes, - max_episodes_rendered=10, + max_episodes_rendered=hydra_cfg.eval.max_episodes_rendered, videos_dir=Path(out_dir) / "videos", start_seed=hydra_cfg.seed, ) @@ -576,9 +585,19 @@ def get_pretrained_policy_path(pretrained_policy_name_or_path, revision=None): pretrained_policy_path = get_pretrained_policy_path( args.pretrained_policy_name_or_path, revision=args.revision ) - - main( - pretrained_policy_path=pretrained_policy_path, - out_dir=args.out_dir, - config_overrides=args.overrides, - ) + if is_launched_with_accelerate(): + import accelerate + + accelerator = accelerate.Accelerator() + main( + pretrained_policy_path=pretrained_policy_path, + out_dir=args.out_dir, + config_overrides=args.overrides, + accelerator=accelerator, + ) + else: + main( + pretrained_policy_path=pretrained_policy_path, + out_dir=args.out_dir, + config_overrides=args.overrides, + ) diff --git a/lerobot/scripts/find_motors_bus_port.py b/lerobot/scripts/find_motors_bus_port.py new file mode 100644 index 000000000..67b92ad7d --- /dev/null +++ b/lerobot/scripts/find_motors_bus_port.py @@ -0,0 +1,42 @@ +import os +import time +from pathlib import Path + +from serial.tools import list_ports # Part of pyserial library + + +def find_available_ports(): + if os.name == "nt": # Windows + # List COM ports using pyserial + ports = [port.device for port in list_ports.comports()] + else: # Linux/macOS + # List /dev/tty* ports for Unix-based systems + ports = [str(path) for path in Path("/dev").glob("tty*")] + return ports + + +def find_port(): + print("Finding all available ports for the MotorsBus.") + ports_before = find_available_ports() + print("Ports before disconnecting:", ports_before) + + print("Remove the USB cable from your MotorsBus and press Enter when done.") + input() # Wait for user to disconnect the device + + time.sleep(0.5) # Allow some time for port to be released + ports_after = find_available_ports() + ports_diff = list(set(ports_before) - set(ports_after)) + + if len(ports_diff) == 1: + port = ports_diff[0] + print(f"The port of this MotorsBus is '{port}'") + print("Reconnect the USB cable.") + elif len(ports_diff) == 0: + raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).") + else: + raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).") + + +if __name__ == "__main__": + # Helper to find the USB port associated with your MotorsBus. + find_port() diff --git a/lerobot/scripts/push_dataset_to_hub.py b/lerobot/scripts/push_dataset_to_hub.py index adc4c72ad..2bb641a4d 100644 --- a/lerobot/scripts/push_dataset_to_hub.py +++ b/lerobot/scripts/push_dataset_to_hub.py @@ -117,10 +117,14 @@ def push_meta_data_to_hub(repo_id: str, meta_data_dir: str | Path, revision: str def push_dataset_card_to_hub( - repo_id: str, revision: str | None, tags: list | None = None, text: str | None = None + repo_id: str, + revision: str | None, + tags: list | None = None, + license: str = "apache-2.0", + **card_kwargs, ): """Creates and pushes a LeRobotDataset Card with appropriate tags to easily find it on the hub.""" - card = create_lerobot_dataset_card(tags=tags, text=text) + card = create_lerobot_dataset_card(tags=tags, license=license, **card_kwargs) card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=revision) @@ -260,7 +264,7 @@ def push_dataset_to_hub( episode_index = 0 tests_videos_dir = tests_data_dir / repo_id / "videos" tests_videos_dir.mkdir(parents=True, exist_ok=True) - for key in lerobot_dataset.video_frame_keys: + for key in lerobot_dataset.camera_keys: fname = f"{key}_episode_{episode_index:06d}.mp4" shutil.copy(videos_dir / fname, tests_videos_dir / fname) diff --git a/lerobot/scripts/train.py b/lerobot/scripts/train.py index 45807503f..0d23eee6f 100644 --- a/lerobot/scripts/train.py +++ b/lerobot/scripts/train.py @@ -21,6 +21,7 @@ from pathlib import Path from pprint import pformat from threading import Lock +from typing import Callable import hydra import numpy as np @@ -43,10 +44,13 @@ from lerobot.common.policies.utils import get_device_from_parameters from lerobot.common.utils.utils import ( format_big_number, + get_accelerate_config, get_safe_torch_device, init_hydra_config, init_logging, + is_launched_with_accelerate, set_global_seed, + update_omegaconf, ) from lerobot.scripts.eval import eval_policy @@ -84,12 +88,15 @@ def make_optimizer_and_scheduler(cfg, policy): ) from diffusers.optimization import get_scheduler - lr_scheduler = get_scheduler( - cfg.training.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=cfg.training.lr_warmup_steps, - num_training_steps=cfg.training.offline_steps, - ) + if cfg.training.lr_scheduler: + lr_scheduler = get_scheduler( + cfg.training.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=cfg.training.lr_warmup_steps, + num_training_steps=cfg.training.offline_steps, + ) + else: + lr_scheduler = None elif policy.name == "tdmpc": optimizer = torch.optim.Adam(policy.parameters(), cfg.training.lr) lr_scheduler = None @@ -113,32 +120,42 @@ def update_policy( lr_scheduler=None, use_amp: bool = False, lock=None, + accelerator: Callable = None, ): """Returns a dictionary of items for logging.""" start_time = time.perf_counter() device = get_device_from_parameters(policy) policy.train() - with torch.autocast(device_type=device.type) if use_amp else nullcontext(): + with torch.autocast(device_type=device.type) if use_amp and accelerator is None else nullcontext(): output_dict = policy.forward(batch) # TODO(rcadene): policy.unnormalize_outputs(out_dict) loss = output_dict["loss"] - grad_scaler.scale(loss).backward() - - # Unscale the graident of the optimzer's assigned params in-place **prior to gradient clipping**. - grad_scaler.unscale_(optimizer) - - grad_norm = torch.nn.utils.clip_grad_norm_( - policy.parameters(), - grad_clip_norm, - error_if_nonfinite=False, - ) + if accelerator: + accelerator.backward(loss) + accelerator.unscale_gradients(optimizer=optimizer) + grad_norm = torch.nn.utils.clip_grad_norm_( + policy.parameters(), + grad_clip_norm, + error_if_nonfinite=False, + ) + optimizer.step() + else: + grad_scaler.scale(loss).backward() + # Unscale the graident of the optimzer's assigned params in-place **prior to gradient clipping**. + grad_scaler.unscale_(optimizer) + + grad_norm = torch.nn.utils.clip_grad_norm_( + policy.parameters(), + grad_clip_norm, + error_if_nonfinite=False, + ) - # Optimizer's gradients are already unscaled, so scaler.step does not unscale them, - # although it still skips optimizer.step() if the gradients contain infs or NaNs. - with lock if lock is not None else nullcontext(): - grad_scaler.step(optimizer) - # Updates the scale for next iteration. - grad_scaler.update() + # Optimizer's gradients are already unscaled, so scaler.step does not unscale them, + # although it still skips optimizer.step() if the gradients contain infs or NaNs. + with lock if lock is not None else nullcontext(): + grad_scaler.step(optimizer) + # Updates the scale for next iteration. + grad_scaler.update() optimizer.zero_grad() @@ -147,7 +164,10 @@ def update_policy( if isinstance(policy, PolicyWithUpdate): # To possibly update an internal buffer (for instance an Exponential Moving Average like in TDMPC). - policy.update() + if accelerator: + accelerator.unwrap_model(policy, keep_fp32_wrapper=True).update() + else: + policy.update() info = { "loss": loss.item(), @@ -161,7 +181,7 @@ def update_policy( return info -def log_train_info(logger: Logger, info, step, cfg, dataset, is_online): +def log_train_info(logger: Logger, info, step, cfg, dataset, is_online, accelerator: Callable = None): loss = info["loss"] grad_norm = info["grad_norm"] lr = info["lr"] @@ -170,10 +190,10 @@ def log_train_info(logger: Logger, info, step, cfg, dataset, is_online): # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. - num_samples = (step + 1) * cfg.training.batch_size - avg_samples_per_ep = dataset.num_samples / dataset.num_episodes + num_samples = (step + 1) * cfg.training.batch_size * (accelerator.num_processes if accelerator else 1) + avg_samples_per_ep = dataset.num_frames / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep - num_epochs = num_samples / dataset.num_samples + num_epochs = num_samples / dataset.num_frames log_items = [ f"step:{format_big_number(step)}", # number of samples seen during training @@ -200,17 +220,17 @@ def log_train_info(logger: Logger, info, step, cfg, dataset, is_online): logger.log_dict(info, step, mode="train") -def log_eval_info(logger, info, step, cfg, dataset, is_online): +def log_eval_info(logger, info, step, cfg, dataset, is_online, accelerator: Callable = None): eval_s = info["eval_s"] avg_sum_reward = info["avg_sum_reward"] pc_success = info["pc_success"] # A sample is an (observation,action) pair, where observation and action # can be on multiple timestamps. In a batch, we have `batch_size`` number of samples. - num_samples = (step + 1) * cfg.training.batch_size - avg_samples_per_ep = dataset.num_samples / dataset.num_episodes + num_samples = (step + 1) * cfg.training.batch_size * (accelerator.num_processes if accelerator else 1) + avg_samples_per_ep = dataset.num_frames / dataset.num_episodes num_episodes = num_samples / avg_samples_per_ep - num_epochs = num_samples / dataset.num_samples + num_epochs = num_samples / dataset.num_frames log_items = [ f"step:{format_big_number(step)}", # number of samples seen during training @@ -234,13 +254,22 @@ def log_eval_info(logger, info, step, cfg, dataset, is_online): logger.log_dict(info, step, mode="eval") -def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = None): +def train( + cfg: DictConfig, out_dir: str | None = None, job_name: str | None = None, accelerator: Callable = None +): if out_dir is None: raise NotImplementedError() if job_name is None: raise NotImplementedError() - init_logging() + init_logging(accelerator) + if accelerator: + assert cfg.training.online_steps == 0, "Online training with accelerate is not implemented." + accelerator_config = get_accelerate_config(accelerator) + update_omegaconf(cfg, config_name="accelerator_config", config=accelerator_config) + logging.info( + f"Acccelerate is enabled, training will be launched with the following configuration:\n{pformat(accelerator_config)}" + ) logging.info(pformat(OmegaConf.to_container(cfg))) if cfg.training.online_steps > 0 and isinstance(cfg.dataset_repo_id, ListConfig): @@ -299,12 +328,15 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No ) # log metrics to terminal and wandb + if accelerator and not accelerator.is_main_process: + # Disable logging on non-main processes. + cfg.wandb.enable = False logger = Logger(cfg, out_dir, wandb_job_name=job_name) - set_global_seed(cfg.seed) + set_global_seed(cfg.seed, accelerator=accelerator) # Check device is available - device = get_safe_torch_device(cfg.device, log=True) + device = get_safe_torch_device(cfg.device, log=True, accelerator=accelerator) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True @@ -323,15 +355,16 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No eval_env = None if cfg.training.eval_freq > 0: logging.info("make_env") - eval_env = make_env(cfg) + eval_env = make_env(cfg, out_dir=out_dir) logging.info("make_policy") policy = make_policy( hydra_cfg=cfg, - dataset_stats=offline_dataset.stats if not cfg.resume else None, + dataset_stats=offline_dataset.meta.stats if not cfg.resume else None, pretrained_policy_name_or_path=str(logger.last_pretrained_model_dir) if cfg.resume else None, ) assert isinstance(policy, nn.Module) + policy.to(device) # Create optimizer and scheduler # Temporary hack to move optimizer out of policy optimizer, lr_scheduler = make_optimizer_and_scheduler(cfg, policy) @@ -349,7 +382,7 @@ def train(cfg: DictConfig, out_dir: str | None = None, job_name: str | None = No logging.info(f"{cfg.env.task=}") logging.info(f"{cfg.training.offline_steps=} ({format_big_number(cfg.training.offline_steps)})") logging.info(f"{cfg.training.online_steps=}") - logging.info(f"{offline_dataset.num_samples=} ({format_big_number(offline_dataset.num_samples)})") + logging.info(f"{offline_dataset.num_frames=} ({format_big_number(offline_dataset.num_frames)})") logging.info(f"{offline_dataset.num_episodes=}") logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})") logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})") @@ -361,17 +394,32 @@ def evaluate_and_checkpoint_if_needed(step, is_online): if cfg.training.eval_freq > 0 and step % cfg.training.eval_freq == 0: logging.info(f"Eval policy at step {step}") - with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.use_amp else nullcontext(): + logging.info(f"max_episodes_rendered {cfg.eval.max_episodes_rendered}") + with ( + torch.no_grad(), + torch.autocast(device_type=device.type) if cfg.use_amp and not accelerator else nullcontext(), + ): assert eval_env is not None + if accelerator: + accelerator.wait_for_everyone() + logging.info("eval_policy") eval_info = eval_policy( eval_env, - policy, + policy if not accelerator else accelerator.unwrap_model(policy, keep_fp32_wrapper=True), cfg.eval.n_episodes, videos_dir=Path(out_dir) / "eval" / f"videos_step_{step_identifier}", - max_episodes_rendered=4, + max_episodes_rendered=cfg.eval.max_episodes_rendered, start_seed=cfg.seed, ) - log_eval_info(logger, eval_info["aggregated"], step, cfg, offline_dataset, is_online=is_online) + log_eval_info( + logger, + eval_info["aggregated"], + step, + cfg, + offline_dataset, + is_online=is_online, + accelerator=accelerator, + ) if cfg.wandb.enable: logger.log_video(eval_info["video_paths"][0], step, mode="eval") logging.info("Resume training") @@ -379,13 +427,14 @@ def evaluate_and_checkpoint_if_needed(step, is_online): if cfg.training.save_checkpoint and ( step % cfg.training.save_freq == 0 or step == cfg.training.offline_steps + cfg.training.online_steps + and (not accelerator or accelerator.is_main_process) ): logging.info(f"Checkpoint policy after step {step}") # Note: Save with step as the identifier, and format it to have at least 6 digits but more if # needed (choose 6 as a minimum for consistency without being overkill). - logger.save_checkpont( + logger.save_checkpoint( step, - policy, + policy if not accelerator else accelerator.unwrap_model(policy), optimizer, lr_scheduler, identifier=step_identifier, @@ -412,6 +461,10 @@ def evaluate_and_checkpoint_if_needed(step, is_online): pin_memory=device.type != "cpu", drop_last=False, ) + if accelerator: + policy, optimizer, dataloader, lr_scheduler = accelerator.prepare( + policy, optimizer, dataloader, lr_scheduler + ) dl_iter = cycle(dataloader) policy.train() @@ -435,12 +488,15 @@ def evaluate_and_checkpoint_if_needed(step, is_online): grad_scaler=grad_scaler, lr_scheduler=lr_scheduler, use_amp=cfg.use_amp, + accelerator=accelerator, ) train_info["dataloading_s"] = dataloading_s - if step % cfg.training.log_freq == 0: - log_train_info(logger, train_info, step, cfg, offline_dataset, is_online=False) + if (step % cfg.training.log_freq == 0) and (not accelerator or accelerator.is_main_process): + log_train_info( + logger, train_info, step, cfg, offline_dataset, is_online=False, accelerator=accelerator + ) # Note: evaluate_and_checkpoint_if_needed happens **after** the `step`th training update has completed, # so we pass in step + 1. @@ -573,7 +629,7 @@ def sample_trajectory_and_update_buffer(): online_drop_n_last_frames=cfg.training.get("drop_n_last_frames", 0) + 1, online_sampling_ratio=cfg.training.online_sampling_ratio, ) - sampler.num_samples = len(concat_dataset) + sampler.num_frames = len(concat_dataset) update_online_buffer_s = time.perf_counter() - start_update_buffer_time @@ -613,6 +669,7 @@ def sample_trajectory_and_update_buffer(): lr_scheduler=lr_scheduler, use_amp=cfg.use_amp, lock=lock, + accelerator=accelerator, ) train_info["dataloading_s"] = dataloading_s @@ -649,11 +706,24 @@ def sample_trajectory_and_update_buffer(): @hydra.main(version_base="1.2", config_name="default", config_path="../configs") def train_cli(cfg: dict): - train( - cfg, - out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir, - job_name=hydra.core.hydra_config.HydraConfig.get().job.name, - ) + if is_launched_with_accelerate(): + import accelerate + + # We set step_scheduler_with_optimizer False to prevent accelerate from + # adjusting the lr_scheduler steps based on the num_processes + accelerator = accelerate.Accelerator(step_scheduler_with_optimizer=False) + train( + cfg, + out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir, + job_name=hydra.core.hydra_config.HydraConfig.get().job.name, + accelerator=accelerator, + ) + else: + train( + cfg, + out_dir=hydra.core.hydra_config.HydraConfig.get().run.dir, + job_name=hydra.core.hydra_config.HydraConfig.get().job.name, + ) def train_notebook(out_dir=None, job_name=None, config_name="default", config_path="../configs"): diff --git a/lerobot/scripts/visualize_dataset.py b/lerobot/scripts/visualize_dataset.py index 6cff5752a..cdd5ce605 100644 --- a/lerobot/scripts/visualize_dataset.py +++ b/lerobot/scripts/visualize_dataset.py @@ -100,7 +100,7 @@ def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray: def visualize_dataset( - repo_id: str, + dataset: LeRobotDataset, episode_index: int, batch_size: int = 32, num_workers: int = 0, @@ -108,7 +108,6 @@ def visualize_dataset( web_port: int = 9090, ws_port: int = 9087, save: bool = False, - root: Path | None = None, output_dir: Path | None = None, ) -> Path | None: if save: @@ -116,8 +115,7 @@ def visualize_dataset( output_dir is not None ), "Set an output directory where to write .rrd files with `--output-dir path/to/directory`." - logging.info("Loading dataset") - dataset = LeRobotDataset(repo_id, root=root) + repo_id = dataset.repo_id logging.info("Loading dataloader") episode_sampler = EpisodeSampler(dataset, episode_index) @@ -153,7 +151,7 @@ def visualize_dataset( rr.set_time_seconds("timestamp", batch["timestamp"][i].item()) # display each camera image - for key in dataset.camera_keys: + for key in dataset.meta.camera_keys: # TODO(rcadene): add `.compress()`? is it lossless? rr.log(key, rr.Image(to_hwc_uint8_numpy(batch[key][i]))) @@ -209,11 +207,17 @@ def main(): required=True, help="Episode to visualize.", ) + parser.add_argument( + "--local-files-only", + type=int, + default=0, + help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.", + ) parser.add_argument( "--root", type=Path, default=None, - help="Root directory for a dataset stored locally (e.g. `--root data`). By default, the dataset will be loaded from hugging face cache folder, or downloaded from the hub if available.", + help="Root directory for the dataset stored locally (e.g. `--root data`). By default, the dataset will be loaded from hugging face cache folder, or downloaded from the hub if available.", ) parser.add_argument( "--output-dir", @@ -268,7 +272,15 @@ def main(): ) args = parser.parse_args() - visualize_dataset(**vars(args)) + kwargs = vars(args) + repo_id = kwargs.pop("repo_id") + root = kwargs.pop("root") + local_files_only = kwargs.pop("local_files_only") + + logging.info("Loading dataset") + dataset = LeRobotDataset(repo_id, root=root, local_files_only=local_files_only) + + visualize_dataset(dataset, **vars(args)) if __name__ == "__main__": diff --git a/lerobot/scripts/visualize_dataset_html.py b/lerobot/scripts/visualize_dataset_html.py index c035e5626..2c81fbfc5 100644 --- a/lerobot/scripts/visualize_dataset_html.py +++ b/lerobot/scripts/visualize_dataset_html.py @@ -93,18 +93,17 @@ def index(): def show_episode(dataset_namespace, dataset_name, episode_id): dataset_info = { "repo_id": dataset.repo_id, - "num_samples": dataset.num_samples, + "num_samples": dataset.num_frames, "num_episodes": dataset.num_episodes, "fps": dataset.fps, } - video_paths = get_episode_video_paths(dataset, episode_id) - language_instruction = get_episode_language_instruction(dataset, episode_id) + video_paths = [dataset.meta.get_video_file_path(episode_id, key) for key in dataset.meta.video_keys] + tasks = dataset.meta.episodes[episode_id]["tasks"] videos_info = [ - {"url": url_for("static", filename=video_path), "filename": Path(video_path).name} + {"url": url_for("static", filename=video_path), "filename": video_path.name} for video_path in video_paths ] - if language_instruction: - videos_info[0]["language_instruction"] = language_instruction + videos_info[0]["language_instruction"] = tasks ep_csv_url = url_for("static", filename=get_ep_csv_fname(episode_id)) return render_template( @@ -131,16 +130,16 @@ def write_episode_data_csv(output_dir, file_name, episode_index, dataset): from_idx = dataset.episode_data_index["from"][episode_index] to_idx = dataset.episode_data_index["to"][episode_index] - has_state = "observation.state" in dataset.hf_dataset.features - has_action = "action" in dataset.hf_dataset.features + has_state = "observation.state" in dataset.features + has_action = "action" in dataset.features # init header of csv with state and action names header = ["timestamp"] if has_state: - dim_state = len(dataset.hf_dataset["observation.state"][0]) + dim_state = dataset.meta.shapes["observation.state"][0] header += [f"state_{i}" for i in range(dim_state)] if has_action: - dim_action = len(dataset.hf_dataset["action"][0]) + dim_action = dataset.meta.shapes["action"][0] header += [f"action_{i}" for i in range(dim_action)] columns = ["timestamp"] @@ -172,27 +171,12 @@ def get_episode_video_paths(dataset: LeRobotDataset, ep_index: int) -> list[str] first_frame_idx = dataset.episode_data_index["from"][ep_index].item() return [ dataset.hf_dataset.select_columns(key)[first_frame_idx][key]["path"] - for key in dataset.video_frame_keys + for key in dataset.meta.video_keys ] -def get_episode_language_instruction(dataset: LeRobotDataset, ep_index: int) -> list[str]: - # check if the dataset has language instructions - if "language_instruction" not in dataset.hf_dataset.features: - return None - - # get first frame index - first_frame_idx = dataset.episode_data_index["from"][ep_index].item() - - language_instruction = dataset.hf_dataset[first_frame_idx]["language_instruction"] - # TODO (michel-aractingi) hack to get the sentence, some strings in openx are badly stored - # with the tf.tensor appearing in the string - return language_instruction.removeprefix("tf.Tensor(b'").removesuffix("', shape=(), dtype=string)") - - def visualize_dataset_html( - repo_id: str, - root: Path | None = None, + dataset: LeRobotDataset, episodes: list[int] = None, output_dir: Path | None = None, serve: bool = True, @@ -202,13 +186,11 @@ def visualize_dataset_html( ) -> Path | None: init_logging() - dataset = LeRobotDataset(repo_id, root=root) - - if not dataset.video: - raise NotImplementedError(f"Image datasets ({dataset.video=}) are currently not supported.") + if len(dataset.meta.image_keys) > 0: + raise NotImplementedError(f"Image keys ({dataset.meta.image_keys=}) are currently not supported.") if output_dir is None: - output_dir = f"outputs/visualize_dataset_html/{repo_id}" + output_dir = f"outputs/visualize_dataset_html/{dataset.repo_id}" output_dir = Path(output_dir) if output_dir.exists(): @@ -225,7 +207,7 @@ def visualize_dataset_html( static_dir.mkdir(parents=True, exist_ok=True) ln_videos_dir = static_dir / "videos" if not ln_videos_dir.exists(): - ln_videos_dir.symlink_to(dataset.videos_dir.resolve()) + ln_videos_dir.symlink_to((dataset.root / "videos").resolve()) template_dir = Path(__file__).resolve().parent.parent / "templates" @@ -252,6 +234,12 @@ def main(): required=True, help="Name of hugging face repositery containing a LeRobotDataset dataset (e.g. `lerobot/pusht` for https://huggingface.co/datasets/lerobot/pusht).", ) + parser.add_argument( + "--local-files-only", + type=int, + default=0, + help="Use local files only. By default, this script will try to fetch the dataset from the hub if it exists.", + ) parser.add_argument( "--root", type=Path, @@ -297,7 +285,13 @@ def main(): ) args = parser.parse_args() - visualize_dataset_html(**vars(args)) + kwargs = vars(args) + repo_id = kwargs.pop("repo_id") + root = kwargs.pop("root") + local_files_only = kwargs.pop("local_files_only") + + dataset = LeRobotDataset(repo_id, root=root, local_files_only=local_files_only) + visualize_dataset_html(dataset, **kwargs) if __name__ == "__main__": diff --git a/lerobot/scripts/visualize_image_transforms.py b/lerobot/scripts/visualize_image_transforms.py index e7cd35827..f9fb5c08a 100644 --- a/lerobot/scripts/visualize_image_transforms.py +++ b/lerobot/scripts/visualize_image_transforms.py @@ -157,7 +157,7 @@ def visualize_transforms(cfg, output_dir: Path, n_examples: int = 5): output_dir.mkdir(parents=True, exist_ok=True) # Get 1st frame from 1st camera of 1st episode - original_frame = dataset[0][dataset.camera_keys[0]] + original_frame = dataset[0][dataset.meta.camera_keys[0]] to_pil(original_frame).save(output_dir / "original_frame.png", quality=100) print("\nOriginal frame saved to:") print(f" {output_dir / 'original_frame.png'}.") diff --git a/lerobot/templates/visualize_dataset_template.html b/lerobot/templates/visualize_dataset_template.html index 4f0bd343e..0fa1e713e 100644 --- a/lerobot/templates/visualize_dataset_template.html +++ b/lerobot/templates/visualize_dataset_template.html @@ -35,7 +35,7 @@

{{ dataset_info.repo_id }}